source
stringlengths
3
92
c
stringlengths
26
2.25M
simd_misc_messages.c
// RUN: %clang_cc1 -fsyntax-only -fopenmp -verify %s -Wuninitialized // RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -verify %s -Wuninitialized void xxx(int argc) { int x; // expected-note {{initialize the variable 'x' to silence this warning}} #pragma omp simd for (int i = 0; i < 10; ++i) argc = x; // expected-warning {{variable 'x' is uninitialized when used here}} } // expected-error@+1 {{unexpected OpenMP directive '#pragma omp simd'}} #pragma omp simd // expected-error@+1 {{unexpected OpenMP directive '#pragma omp simd'}} #pragma omp simd foo // expected-error@+1 {{unexpected OpenMP directive '#pragma omp simd'}} #pragma omp simd safelen(4) void test_no_clause() { int i; #pragma omp simd for (i = 0; i < 16; ++i) ; // expected-error@+2 {{statement after '#pragma omp simd' must be a for loop}} #pragma omp simd ++i; } void test_branch_protected_scope() { int i = 0; L1: ++i; int x[24]; #pragma omp simd for (i = 0; i < 16; ++i) { if (i == 5) goto L1; // expected-error {{use of undeclared label 'L1'}} else if (i == 6) return; // expected-error {{cannot return from OpenMP region}} else if (i == 7) goto L2; else if (i == 8) { L2: x[i]++; } } if (x[0] == 0) goto L2; // expected-error {{use of undeclared label 'L2'}} else if (x[1] == 1) goto L1; } void test_invalid_clause() { int i; // expected-warning@+1 {{extra tokens at the end of '#pragma omp simd' are ignored}} #pragma omp simd foo bar for (i = 0; i < 16; ++i) ; } void test_non_identifiers() { int i, x; // expected-warning@+1 {{extra tokens at the end of '#pragma omp simd' are ignored}} #pragma omp simd; for (i = 0; i < 16; ++i) ; // expected-error@+2 {{unexpected OpenMP clause 'firstprivate' in directive '#pragma omp simd'}} // expected-warning@+1 {{extra tokens at the end of '#pragma omp simd' are ignored}} #pragma omp simd firstprivate(x); for (i = 0; i < 16; ++i) ; // expected-warning@+1 {{extra tokens at the end of '#pragma omp simd' are ignored}} #pragma omp simd private(x); for (i = 0; i < 16; ++i) ; // expected-warning@+1 {{extra tokens at the end of '#pragma omp simd' are ignored}} #pragma omp simd, private(x); for (i = 0; i < 16; ++i) ; } extern int foo(); void test_safelen() { int i; // expected-error@+1 {{expected '('}} #pragma omp simd safelen for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp simd safelen( for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp simd safelen() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp simd safelen(, for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp simd safelen(, ) for (i = 0; i < 16; ++i) ; // expected-warning@+2 {{extra tokens at the end of '#pragma omp simd' are ignored}} // expected-error@+1 {{expected '('}} #pragma omp simd safelen 4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp simd safelen(4 for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp simd safelen(4, for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp simd safelen(4, ) for (i = 0; i < 16; ++i) ; // xxpected-error@+1 {{expected expression}} #pragma omp simd safelen(4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp simd safelen(4 4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp simd safelen(4, , 4) for (i = 0; i < 16; ++i) ; #pragma omp simd safelen(4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp simd safelen(4, 8) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp simd safelen(2.5) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp simd safelen(foo()) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}} #pragma omp simd safelen(-5) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}} #pragma omp simd safelen(0) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}} #pragma omp simd safelen(5 - 5) for (i = 0; i < 16; ++i) ; } void test_simdlen() { int i; // expected-error@+1 {{expected '('}} #pragma omp simd simdlen for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp simd simdlen( for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp simd simdlen() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp simd simdlen(, for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp simd simdlen(, ) for (i = 0; i < 16; ++i) ; // expected-warning@+2 {{extra tokens at the end of '#pragma omp simd' are ignored}} // expected-error@+1 {{expected '('}} #pragma omp simd simdlen 4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp simd simdlen(4 for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp simd simdlen(4, for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp simd simdlen(4, ) for (i = 0; i < 16; ++i) ; #pragma omp simd simdlen(4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp simd simdlen(4 4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp simd simdlen(4, , 4) for (i = 0; i < 16; ++i) ; #pragma omp simd simdlen(4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp simd simdlen(4, 8) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp simd simdlen(2.5) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp simd simdlen(foo()) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}} #pragma omp simd simdlen(-5) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}} #pragma omp simd simdlen(0) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}} #pragma omp simd simdlen(5 - 5) for (i = 0; i < 16; ++i) ; } void test_safelen_simdlen() { int i; // expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}} #pragma omp simd simdlen(6) safelen(5) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}} #pragma omp simd safelen(5) simdlen(6) for (i = 0; i < 16; ++i) ; } void test_collapse() { int i; // expected-error@+1 {{expected '('}} #pragma omp simd collapse for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp simd collapse( for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp simd collapse() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp simd collapse(, for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp simd collapse(, ) for (i = 0; i < 16; ++i) ; // expected-warning@+2 {{extra tokens at the end of '#pragma omp simd' are ignored}} // expected-error@+1 {{expected '('}} #pragma omp simd collapse 4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp simd collapse(4 for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp simd', but found only 1}} // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp simd collapse(4, for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp simd', but found only 1}} // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp simd collapse(4, ) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp simd', but found only 1}} // xxpected-error@+1 {{expected expression}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp simd collapse(4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp simd', but found only 1}} // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp simd collapse(4 4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp simd', but found only 1}} // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp simd collapse(4, , 4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp simd', but found only 1}} #pragma omp simd collapse(4) for (int i1 = 0; i1 < 16; ++i1) for (int i2 = 0; i2 < 16; ++i2) for (int i3 = 0; i3 < 16; ++i3) for (int i4 = 0; i4 < 16; ++i4) foo(); // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp simd collapse(4, 8) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp simd', but found only 1}} // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp simd collapse(2.5) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp simd collapse(foo()) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp simd collapse(-5) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp simd collapse(0) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp simd collapse(5 - 5) for (i = 0; i < 16; ++i) ; // expected-note@+2 2 {{defined as reduction}} #pragma omp parallel #pragma omp simd collapse(2) reduction(+ : i) for (i = 0; i < 16; ++i) // expected-error {{loop iteration variable in the associated loop of 'omp simd' directive may not be reduction, predetermined as lastprivate}} // expected-note@+1 {{variable with automatic storage duration is predetermined as private; perhaps you forget to enclose 'omp for' directive into a parallel or another task region?}} for (int j = 0; j < 16; ++j) // expected-error@+2 2 {{reduction variable must be shared}} // expected-error@+1 {{OpenMP constructs may not be nested inside a simd region}} #pragma omp for reduction(+ : i, j) for (int k = 0; k < 16; ++k) i += j; #pragma omp parallel #pragma omp for for (i = 0; i < 16; ++i) for (int j = 0; j < 16; ++j) #pragma omp simd reduction(+ : i, j) for (int k = 0; k < 16; ++k) i += j; } void test_linear() { int i; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp simd linear( for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp simd linear(, for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} #pragma omp simd linear(, ) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp simd linear() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp simd linear(int) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected variable name}} #pragma omp simd linear(0) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{use of undeclared identifier 'x'}} #pragma omp simd linear(x) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{use of undeclared identifier 'x'}} // expected-error@+1 {{use of undeclared identifier 'y'}} #pragma omp simd linear(x, y) for (i = 0; i < 16; ++i) ; // expected-error@+3 {{use of undeclared identifier 'x'}} // expected-error@+2 {{use of undeclared identifier 'y'}} // expected-error@+1 {{use of undeclared identifier 'z'}} #pragma omp simd linear(x, y, z) for (i = 0; i < 16; ++i) ; int x, y; // expected-error@+1 {{expected expression}} #pragma omp simd linear(x :) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp simd linear(x :, ) for (i = 0; i < 16; ++i) ; #pragma omp simd linear(x : 1) for (i = 0; i < 16; ++i) ; #pragma omp simd linear(x : 2 * 2) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp simd linear(x : 1, y) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp simd linear(x : 1, y, z : 1) for (i = 0; i < 16; ++i) ; // expected-note@+2 {{defined as linear}} // expected-error@+1 {{linear variable cannot be linear}} #pragma omp simd linear(x) linear(x) for (i = 0; i < 16; ++i) ; // expected-note@+2 {{defined as private}} // expected-error@+1 {{private variable cannot be linear}} #pragma omp simd private(x) linear(x) for (i = 0; i < 16; ++i) ; // expected-note@+2 {{defined as linear}} // expected-error@+1 {{linear variable cannot be private}} #pragma omp simd linear(x) private(x) for (i = 0; i < 16; ++i) ; // expected-warning@+1 {{zero linear step (x and other variables in clause should probably be const)}} #pragma omp simd linear(x, y : 0) for (i = 0; i < 16; ++i) ; // expected-note@+2 {{defined as linear}} // expected-error@+1 {{linear variable cannot be lastprivate}} #pragma omp simd linear(x) lastprivate(x) for (i = 0; i < 16; ++i) ; // expected-note@+2 {{defined as lastprivate}} // expected-error@+1 {{lastprivate variable cannot be linear}} #pragma omp simd lastprivate(x) linear(x) for (i = 0; i < 16; ++i) ; } void test_aligned() { int i; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp simd aligned( for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp simd aligned(, for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} #pragma omp simd aligned(, ) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp simd aligned() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp simd aligned(int) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected variable name}} #pragma omp simd aligned(0) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{use of undeclared identifier 'x'}} #pragma omp simd aligned(x) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{use of undeclared identifier 'x'}} // expected-error@+1 {{use of undeclared identifier 'y'}} #pragma omp simd aligned(x, y) for (i = 0; i < 16; ++i) ; // expected-error@+3 {{use of undeclared identifier 'x'}} // expected-error@+2 {{use of undeclared identifier 'y'}} // expected-error@+1 {{use of undeclared identifier 'z'}} #pragma omp simd aligned(x, y, z) for (i = 0; i < 16; ++i) ; int *x, y, z[25]; // expected-note 4 {{'y' defined here}} #pragma omp simd aligned(x) for (i = 0; i < 16; ++i) ; #pragma omp simd aligned(z) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp simd aligned(x :) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp simd aligned(x :, ) for (i = 0; i < 16; ++i) ; #pragma omp simd aligned(x : 1) for (i = 0; i < 16; ++i) ; #pragma omp simd aligned(x : 2 * 2) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp simd aligned(x : 1, y) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp simd aligned(x : 1, y, z : 1) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument of aligned clause should be array or pointer, not 'int'}} #pragma omp simd aligned(x, y) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument of aligned clause should be array or pointer, not 'int'}} #pragma omp simd aligned(x, y, z) for (i = 0; i < 16; ++i) ; // expected-note@+2 {{defined as aligned}} // expected-error@+1 {{a variable cannot appear in more than one aligned clause}} #pragma omp simd aligned(x) aligned(z, x) for (i = 0; i < 16; ++i) ; // expected-note@+3 {{defined as aligned}} // expected-error@+2 {{a variable cannot appear in more than one aligned clause}} // expected-error@+1 2 {{argument of aligned clause should be array or pointer, not 'int'}} #pragma omp simd aligned(x, y, z) aligned(y, z) for (i = 0; i < 16; ++i) ; } void test_private() { int i; // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp simd private( for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp simd private(, for (i = 0; i < 16; ++i) ; // expected-error@+1 2 {{expected expression}} #pragma omp simd private(, ) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp simd private() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp simd private(int) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected variable name}} #pragma omp simd private(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp simd private(x) for (i = 0; i < 16; ++i) ; #pragma omp simd private(x, y) for (i = 0; i < 16; ++i) ; #pragma omp simd private(x, y, z) for (i = 0; i < 16; ++i) { x = y * i + z; } } void test_firstprivate() { int i; // expected-error@+3 {{expected ')'}} expected-note@+3 {{to match this '('}} // expected-error@+2 {{unexpected OpenMP clause 'firstprivate' in directive '#pragma omp simd'}} // expected-error@+1 {{expected expression}} #pragma omp simd firstprivate( for (i = 0; i < 16; ++i) ; } void test_lastprivate() { int i; // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} #pragma omp simd lastprivate( for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp simd lastprivate(, for (i = 0; i < 16; ++i) ; // expected-error@+1 2 {{expected expression}} #pragma omp simd lastprivate(, ) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp simd lastprivate() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp simd lastprivate(int) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected variable name}} #pragma omp simd lastprivate(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp simd lastprivate(x) for (i = 0; i < 16; ++i) ; #pragma omp simd lastprivate(x, y) for (i = 0; i < 16; ++i) ; #pragma omp simd lastprivate(x, y, z) for (i = 0; i < 16; ++i) ; } void test_reduction() { int i, x, y; // expected-error@+3 {{expected ')'}} expected-note@+3 {{to match this '('}} // expected-error@+2 {{expected identifier}} // expected-warning@+1 {{missing ':' after reduction identifier - ignoring}} #pragma omp simd reduction( for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected identifier}} // expected-warning@+1 {{missing ':' after reduction identifier - ignoring}} #pragma omp simd reduction() for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected expression}} // expected-warning@+1 {{missing ':' after reduction identifier - ignoring}} #pragma omp simd reduction(x) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected identifier}} #pragma omp simd reduction( : x) for (i = 0; i < 16; ++i) ; // expected-error@+3 {{expected ')'}} expected-note@+3 {{to match this '('}} // expected-error@+2 {{expected identifier}} // expected-warning@+1 {{missing ':' after reduction identifier - ignoring}} #pragma omp simd reduction(, for (i = 0; i < 16; ++i) ; // expected-error@+3 {{expected ')'}} expected-note@+3 {{to match this '('}} // expected-error@+2 {{expected expression}} // expected-warning@+1 {{missing ':' after reduction identifier - ignoring}} #pragma omp simd reduction(+ for (i = 0; i < 16; ++i) ; // expected-error@+3 {{expected ')'}} expected-note@+3 {{to match this '('}} // // expected-error@+1 {{expected expression}} #pragma omp simd reduction(+: for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp simd reduction(+ :) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp simd reduction(+ :, y) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp simd reduction(+ : x, + : y) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected identifier}} #pragma omp simd reduction(% : x) for (i = 0; i < 16; ++i) ; #pragma omp simd reduction(+ : x) for (i = 0; i < 16; ++i) ; #pragma omp simd reduction(* : x) for (i = 0; i < 16; ++i) ; #pragma omp simd reduction(- : x) for (i = 0; i < 16; ++i) ; #pragma omp simd reduction(& : x) for (i = 0; i < 16; ++i) ; #pragma omp simd reduction(| : x) for (i = 0; i < 16; ++i) ; #pragma omp simd reduction(^ : x) for (i = 0; i < 16; ++i) ; #pragma omp simd reduction(&& : x) for (i = 0; i < 16; ++i) ; #pragma omp simd reduction(|| : x) for (i = 0; i < 16; ++i) ; #pragma omp simd reduction(max : x) for (i = 0; i < 16; ++i) ; #pragma omp simd reduction(min : x) for (i = 0; i < 16; ++i) ; struct X { int x; }; struct X X; // expected-error@+1 {{expected variable name}} #pragma omp simd reduction(+ : X.x) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected variable name}} #pragma omp simd reduction(+ : x + x) for (i = 0; i < 16; ++i) ; } void test_loop_messages() { float a[100], b[100], c[100]; // expected-error@+2 {{variable must be of integer or pointer type}} #pragma omp simd for (float fi = 0; fi < 10.0; fi++) { c[(int)fi] = a[(int)fi] + b[(int)fi]; } // expected-error@+2 {{variable must be of integer or pointer type}} #pragma omp simd for (double fi = 0; fi < 10.0; fi++) { c[(int)fi] = a[(int)fi] + b[(int)fi]; } } void linear_modifiers(int argc) { int f; #pragma omp simd linear(f) for (int k = 0; k < argc; ++k) ++k; #pragma omp simd linear(val(f)) for (int k = 0; k < argc; ++k) ++k; #pragma omp simd linear(uval(f)) // expected-error {{expected 'val' modifier}} for (int k = 0; k < argc; ++k) ++k; #pragma omp simd linear(ref(f)) // expected-error {{expected 'val' modifier}} for (int k = 0; k < argc; ++k) ++k; #pragma omp simd linear(foo(f)) // expected-error {{expected 'val' modifier}} for (int k = 0; k < argc; ++k) ++k; }
convolution_3x3_int8.h
// BUG1989 is pleased to support the open source community by supporting ncnn available. // // Copyright (C) 2019 BUG1989. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_int8_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const signed char* kernel = _kernel; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { Mat out0 = top_blob.channel(p); out0.fill(0); const signed char* kernel0 = (const signed char*)kernel + p * inch * 9; for (int q = 0; q < inch; q++) { int* outptr0 = out0; const signed char* img0 = bottom_blob.channel(q); const signed char* r0 = img0; const signed char* r1 = img0 + w; const signed char* r2 = img0 + w * 2; for (int i = 0; i < outh; i++) { int remain = outw; for (; remain > 0; remain--) { int sum0 = 0; sum0 += (int)r0[0] * kernel0[0]; sum0 += (int)r0[1] * kernel0[1]; sum0 += (int)r0[2] * kernel0[2]; sum0 += (int)r1[0] * kernel0[3]; sum0 += (int)r1[1] * kernel0[4]; sum0 += (int)r1[2] * kernel0[5]; sum0 += (int)r2[0] * kernel0[6]; sum0 += (int)r2[1] * kernel0[7]; sum0 += (int)r2[2] * kernel0[8]; *outptr0 += sum0; r0++; r1++; r2++; outptr0++; } r0 += 2; r1 += 2; r2 += 2; } kernel0 += 9; } } } static void conv3x3s1_winograd23_transform_kernel_int8_sse(const Mat& kernel, Mat& kernel_tm, int inch, int outch, const Option& opt) { kernel_tm.create(4 * 4, inch, outch, (size_t)2u); // G const short ktm[4][3] = { {2, 0, 0}, {1, 1, 1}, {1, -1, 1}, {0, 0, 2} }; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const signed char* kernel0 = (const signed char*)kernel + p * inch * 9 + q * 9; short* kernel_tm0 = kernel_tm.channel(p).row<short>(q); // transform kernel const signed char* k0 = kernel0; const signed char* k1 = kernel0 + 3; const signed char* k2 = kernel0 + 6; // h short tmp[4][3]; for (int i = 0; i < 4; i++) { tmp[i][0] = (short)k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = (short)k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = (short)k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // U for (int j = 0; j < 4; j++) { short* tmpp = &tmp[j][0]; for (int i = 0; i < 4; i++) { kernel_tm0[j * 4 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } } static void conv3x3s1_winograd23_int8_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 2n+2, winograd F(2,3) Mat bottom_blob_bordered = bottom_blob; outw = (outw + 1) / 2 * 2; outh = (outh + 1) / 2 * 2; w = outw + 2; h = outh + 2; Option opt_b = opt; opt_b.blob_allocator = opt.workspace_allocator; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f, opt_b); // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 2 * 4; int h_tm = outh / 2 * 4; int nColBlocks = h_tm / 4; // may be the block num in Feathercnn int nRowBlocks = w_tm / 4; const int tiles = nColBlocks * nRowBlocks; bottom_blob_tm.create(4 * 4, tiles, inch, 2u, opt.workspace_allocator); // BT // const float itm[4][4] = { // {1.0f, 0.0f, -1.0f, 0.0f}, // {0.0f, 1.0f, 1.00f, 0.0f}, // {0.0f, -1.0f, 1.00f, 0.0f}, // {0.0f, -1.0f, 0.00f, 1.0f} // }; #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const signed char* img = bottom_blob_bordered.channel(q); short* out_tm0 = bottom_blob_tm.channel(q); for (int j = 0; j < nColBlocks; j++) { const signed char* r0 = img + w * j * 2; const signed char* r1 = r0 + w; const signed char* r2 = r1 + w; const signed char* r3 = r2 + w; for (int i = 0; i < nRowBlocks; i++) { short d0[4], d1[4], d2[4], d3[4]; short w0[4], w1[4], w2[4], w3[4]; short t0[4], t1[4], t2[4], t3[4]; // load for (int n = 0; n < 4; n++) { d0[n] = r0[n]; d1[n] = r1[n]; d2[n] = r2[n]; d3[n] = r3[n]; } // w = B_t * d for (int n = 0; n < 4; n++) { w0[n] = d0[n] - d2[n]; w1[n] = d1[n] + d2[n]; w2[n] = d2[n] - d1[n]; w3[n] = d3[n] - d1[n]; } // transpose d to d_t { t0[0] = w0[0]; t1[0] = w0[1]; t2[0] = w0[2]; t3[0] = w0[3]; t0[1] = w1[0]; t1[1] = w1[1]; t2[1] = w1[2]; t3[1] = w1[3]; t0[2] = w2[0]; t1[2] = w2[1]; t2[2] = w2[2]; t3[2] = w2[3]; t0[3] = w3[0]; t1[3] = w3[1]; t2[3] = w3[2]; t3[3] = w3[3]; } // U = B_t * d_t for (int n = 0; n < 4; n++) { d0[n] = t0[n] - t2[n]; d1[n] = t1[n] + t2[n]; d2[n] = t2[n] - t1[n]; d3[n] = t3[n] - t1[n]; } // save to out_tm for (int n = 0; n < 4; n++) { out_tm0[n] = d0[n]; out_tm0[n + 4] = d1[n]; out_tm0[n + 8] = d2[n]; out_tm0[n + 12] = d3[n]; } r0 += 2; r1 += 2; r2 += 2; r3 += 2; out_tm0 += 16; } } } } bottom_blob_bordered = Mat(); // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 2 * 4; int h_tm = outh / 2 * 4; int nColBlocks = h_tm / 4; // may be the block num in Feathercnn int nRowBlocks = w_tm / 4; const int tiles = nColBlocks * nRowBlocks; top_blob_tm.create(16, tiles, outch, 4u, opt.workspace_allocator); int nn_outch = outch >> 2; int remain_outch_start = nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 4; Mat out0_tm = top_blob_tm.channel(p); Mat out1_tm = top_blob_tm.channel(p + 1); Mat out2_tm = top_blob_tm.channel(p + 2); Mat out3_tm = top_blob_tm.channel(p + 3); const Mat kernel0_tm = kernel_tm.channel(p); const Mat kernel1_tm = kernel_tm.channel(p + 1); const Mat kernel2_tm = kernel_tm.channel(p + 2); const Mat kernel3_tm = kernel_tm.channel(p + 3); for (int i = 0; i < tiles; i++) { int* output0_tm = out0_tm.row<int>(i); int* output1_tm = out1_tm.row<int>(i); int* output2_tm = out2_tm.row<int>(i); int* output3_tm = out3_tm.row<int>(i); int sum0[16] = {0}; int sum1[16] = {0}; int sum2[16] = {0}; int sum3[16] = {0}; int q = 0; for (; q + 3 < inch; q += 4) { const short* r0 = bottom_blob_tm.channel(q).row<short>(i); const short* r1 = bottom_blob_tm.channel(q + 1).row<short>(i); const short* r2 = bottom_blob_tm.channel(q + 2).row<short>(i); const short* r3 = bottom_blob_tm.channel(q + 3).row<short>(i); const short* k0 = kernel0_tm.row<short>(q); const short* k1 = kernel1_tm.row<short>(q); const short* k2 = kernel2_tm.row<short>(q); const short* k3 = kernel3_tm.row<short>(q); for (int n = 0; n < 16; n++) { sum0[n] += (int)r0[n] * k0[n]; k0 += 16; sum0[n] += (int)r1[n] * k0[n]; k0 += 16; sum0[n] += (int)r2[n] * k0[n]; k0 += 16; sum0[n] += (int)r3[n] * k0[n]; k0 -= 16 * 3; sum1[n] += (int)r0[n] * k1[n]; k1 += 16; sum1[n] += (int)r1[n] * k1[n]; k1 += 16; sum1[n] += (int)r2[n] * k1[n]; k1 += 16; sum1[n] += (int)r3[n] * k1[n]; k1 -= 16 * 3; sum2[n] += (int)r0[n] * k2[n]; k2 += 16; sum2[n] += (int)r1[n] * k2[n]; k2 += 16; sum2[n] += (int)r2[n] * k2[n]; k2 += 16; sum2[n] += (int)r3[n] * k2[n]; k2 -= 16 * 3; sum3[n] += (int)r0[n] * k3[n]; k3 += 16; sum3[n] += (int)r1[n] * k3[n]; k3 += 16; sum3[n] += (int)r2[n] * k3[n]; k3 += 16; sum3[n] += (int)r3[n] * k3[n]; k3 -= 16 * 3; } } for (; q < inch; q++) { const short* r0 = bottom_blob_tm.channel(q).row<short>(i); const short* k0 = kernel0_tm.row<short>(q); const short* k1 = kernel1_tm.row<short>(q); const short* k2 = kernel2_tm.row<short>(q); const short* k3 = kernel3_tm.row<short>(q); for (int n = 0; n < 16; n++) { sum0[n] += (int)r0[n] * k0[n]; sum1[n] += (int)r0[n] * k1[n]; sum2[n] += (int)r0[n] * k2[n]; sum3[n] += (int)r0[n] * k3[n]; } } for (int n = 0; n < 16; n++) { output0_tm[n] = sum0[n]; output1_tm[n] = sum1[n]; output2_tm[n] = sum2[n]; output3_tm[n] = sum3[n]; } } } #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { Mat out0_tm = top_blob_tm.channel(p); const Mat kernel0_tm = kernel_tm.channel(p); for (int i = 0; i < tiles; i++) { int* output0_tm = out0_tm.row<int>(i); int sum0[16] = {0}; int q = 0; for (; q + 3 < inch; q += 4) { const short* r0 = bottom_blob_tm.channel(q).row<short>(i); const short* r1 = bottom_blob_tm.channel(q + 1).row<short>(i); const short* r2 = bottom_blob_tm.channel(q + 2).row<short>(i); const short* r3 = bottom_blob_tm.channel(q + 3).row<short>(i); const short* k0 = kernel0_tm.row<short>(q); const short* k1 = kernel0_tm.row<short>(q + 1); const short* k2 = kernel0_tm.row<short>(q + 2); const short* k3 = kernel0_tm.row<short>(q + 3); for (int n = 0; n < 16; n++) { sum0[n] += (int)r0[n] * k0[n]; sum0[n] += (int)r1[n] * k1[n]; sum0[n] += (int)r2[n] * k2[n]; sum0[n] += (int)r3[n] * k3[n]; } } for (; q < inch; q++) { const short* r0 = bottom_blob_tm.channel(q).row<short>(i); const short* k0 = kernel0_tm.row<short>(q); for (int n = 0; n < 16; n++) { sum0[n] += (int)r0[n] * k0[n]; } } for (int n = 0; n < 16; n++) { output0_tm[n] = sum0[n]; } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; top_blob_bordered.create(outw, outh, outch, 4u, opt.workspace_allocator); { // AT // const float itm[2][4] = { // {1.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 1.0f} // }; int w_tm = outw / 2 * 4; int h_tm = outh / 2 * 4; int nColBlocks = h_tm / 4; // may be the block num in Feathercnn int nRowBlocks = w_tm / 4; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { Mat out_tm = top_blob_tm.channel(p); Mat out = top_blob_bordered.channel(p); for (int j = 0; j < nColBlocks; j++) { int* outRow0 = out.row<int>(j * 2); int* outRow1 = out.row<int>(j * 2 + 1); for (int i = 0; i < nRowBlocks; i++) { int* out_tile = out_tm.row<int>(j * nRowBlocks + i); int s0[4], s1[4], s2[4], s3[4]; int w0[4], w1[4]; int d0[2], d1[2], d2[2], d3[2]; int o0[2], o1[2]; // load for (int n = 0; n < 4; n++) { s0[n] = out_tile[n]; s1[n] = out_tile[n + 4]; s2[n] = out_tile[n + 8]; s3[n] = out_tile[n + 12]; } // w = A_T * W for (int n = 0; n < 4; n++) { w0[n] = s0[n] + s1[n] + s2[n]; w1[n] = s1[n] - s2[n] + s3[n]; } // transpose w to w_t { d0[0] = w0[0]; d0[1] = w1[0]; d1[0] = w0[1]; d1[1] = w1[1]; d2[0] = w0[2]; d2[1] = w1[2]; d3[0] = w0[3]; d3[1] = w1[3]; } // Y = A_T * w_t for (int n = 0; n < 2; n++) { o0[n] = d0[n] + d1[n] + d2[n]; o1[n] = d1[n] - d2[n] + d3[n]; } // save to top blob tm,why right 2,because the G' = G*2 outRow0[0] = o0[0] >> 2; outRow0[1] = o0[1] >> 2; outRow1[0] = o1[0] >> 2; outRow1[1] = o1[1] >> 2; outRow0 += 2; outRow1 += 2; } } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt); } static void conv3x3s1_winograd43_transform_kernel_int8_sse(const Mat& kernel, Mat& kernel_tm, int inch, int outch, const Option& opt) { kernel_tm.create(6 * 6, inch, outch, (size_t)2u); // G // const float ktm[6][3] = { // { 1.0f/4, 0.0f, 0.0f}, // { -1.0f/6, -1.0f/6, -1.0f/6}, // { -1.0f/6, 1.0f/6, -1.0f/6}, // { 1.0f/24, 1.0f/12, 1.0f/6}, // { 1.0f/24, -1.0f/12, 1.0f/6}, // { 0.0f, 0.0f, 1.0f} // }; const short ktm[6][3] = { {6, 0, 0}, {-4, -4, -4}, {-4, 4, -4}, {1, 2, 4}, {1, -2, 4}, {0, 0, 24} }; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const signed char* kernel0 = (const signed char*)kernel + p * inch * 9 + q * 9; short* kernel_tm0 = kernel_tm.channel(p).row<short>(q); // transform kernel const signed char* k0 = kernel0; const signed char* k1 = kernel0 + 3; const signed char* k2 = kernel0 + 6; // h short tmp[6][3]; for (int i = 0; i < 6; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // U for (int j = 0; j < 6; j++) { short* tmpp = &tmp[j][0]; for (int i = 0; i < 6; i++) { kernel_tm0[j * 6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } } static void conv3x3s1_winograd43_int8_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 4n+2, winograd F(4,3) Mat bottom_blob_bordered = bottom_blob; outw = (outw + 3) / 4 * 4; outh = (outh + 3) / 4 * 4; w = outw + 2; h = outh + 2; Option opt_b = opt; opt_b.blob_allocator = opt.workspace_allocator; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f, opt_b); // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; int nColBlocks = h_tm / 6; // may be the block num in Feathercnn int nRowBlocks = w_tm / 6; const int tiles = nColBlocks * nRowBlocks; bottom_blob_tm.create(6 * 6, tiles, inch, 2u, opt.workspace_allocator); // BT // const float itm[4][4] = { // {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f}, // {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f}, // {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f}, // {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f} // }; // 0 = 4 * r00 - 5 * r02 + r04 // 1 = -4 * (r01 + r02) + r03 + r04 // 2 = 4 * (r01 - r02) - r03 + r04 // 3 = -2 * r01 - r02 + 2 * r03 + r04 // 4 = 2 * r01 - r02 - 2 * r03 + r04 // 5 = 4 * r01 - 5 * r03 + r05 #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const signed char* img = bottom_blob_bordered.channel(q); short* out_tm0 = bottom_blob_tm.channel(q); for (int j = 0; j < nColBlocks; j++) { const signed char* r0 = img + w * j * 4; const signed char* r1 = r0 + w; const signed char* r2 = r1 + w; const signed char* r3 = r2 + w; const signed char* r4 = r3 + w; const signed char* r5 = r4 + w; for (int i = 0; i < nRowBlocks; i++) { short d0[6], d1[6], d2[6], d3[6], d4[6], d5[6]; short w0[6], w1[6], w2[6], w3[6], w4[6], w5[6]; short t0[6], t1[6], t2[6], t3[6], t4[6], t5[6]; // load for (int n = 0; n < 6; n++) { d0[n] = r0[n]; d1[n] = r1[n]; d2[n] = r2[n]; d3[n] = r3[n]; d4[n] = r4[n]; d5[n] = r5[n]; } // w = B_t * d for (int n = 0; n < 6; n++) { w0[n] = 4 * d0[n] - 5 * d2[n] + d4[n]; w1[n] = -4 * d1[n] - 4 * d2[n] + d3[n] + d4[n]; w2[n] = 4 * d1[n] - 4 * d2[n] - d3[n] + d4[n]; w3[n] = -2 * d1[n] - d2[n] + 2 * d3[n] + d4[n]; w4[n] = 2 * d1[n] - d2[n] - 2 * d3[n] + d4[n]; w5[n] = 4 * d1[n] - 5 * d3[n] + d5[n]; } // transpose d to d_t { t0[0] = w0[0]; t1[0] = w0[1]; t2[0] = w0[2]; t3[0] = w0[3]; t4[0] = w0[4]; t5[0] = w0[5]; t0[1] = w1[0]; t1[1] = w1[1]; t2[1] = w1[2]; t3[1] = w1[3]; t4[1] = w1[4]; t5[1] = w1[5]; t0[2] = w2[0]; t1[2] = w2[1]; t2[2] = w2[2]; t3[2] = w2[3]; t4[2] = w2[4]; t5[2] = w2[5]; t0[3] = w3[0]; t1[3] = w3[1]; t2[3] = w3[2]; t3[3] = w3[3]; t4[3] = w3[4]; t5[3] = w3[5]; t0[4] = w4[0]; t1[4] = w4[1]; t2[4] = w4[2]; t3[4] = w4[3]; t4[4] = w4[4]; t5[4] = w4[5]; t0[5] = w5[0]; t1[5] = w5[1]; t2[5] = w5[2]; t3[5] = w5[3]; t4[5] = w5[4]; t5[5] = w5[5]; } // d = B_t * d_t for (int n = 0; n < 6; n++) { d0[n] = 4 * t0[n] - 5 * t2[n] + t4[n]; d1[n] = -4 * t1[n] - 4 * t2[n] + t3[n] + t4[n]; d2[n] = 4 * t1[n] - 4 * t2[n] - t3[n] + t4[n]; d3[n] = -2 * t1[n] - t2[n] + 2 * t3[n] + t4[n]; d4[n] = 2 * t1[n] - t2[n] - 2 * t3[n] + t4[n]; d5[n] = 4 * t1[n] - 5 * t3[n] + t5[n]; } // save to out_tm for (int n = 0; n < 6; n++) { out_tm0[n] = d0[n]; out_tm0[n + 6] = d1[n]; out_tm0[n + 12] = d2[n]; out_tm0[n + 18] = d3[n]; out_tm0[n + 24] = d4[n]; out_tm0[n + 30] = d5[n]; } r0 += 4; r1 += 4; r2 += 4; r3 += 4; r4 += 4; r5 += 4; out_tm0 += 36; } } } } bottom_blob_bordered = Mat(); // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; int nColBlocks = h_tm / 6; // may be the block num in Feathercnn int nRowBlocks = w_tm / 6; const int tiles = nColBlocks * nRowBlocks; top_blob_tm.create(36, tiles, outch, 4u, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { Mat out0_tm = top_blob_tm.channel(p); const Mat kernel0_tm = kernel_tm.channel(p); for (int i = 0; i < tiles; i++) { int* output0_tm = out0_tm.row<int>(i); int sum0[36] = {0}; for (int q = 0; q < inch; q++) { const short* r0 = bottom_blob_tm.channel(q).row<short>(i); const short* k0 = kernel0_tm.row<short>(q); for (int n = 0; n < 36; n++) { sum0[n] += (int)r0[n] * k0[n]; } } for (int n = 0; n < 36; n++) { output0_tm[n] = sum0[n]; } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; top_blob_bordered.create(outw, outh, outch, 4u, opt.workspace_allocator); { // AT // const float itm[4][6] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f} // }; // 0 = r00 + r01 + r02 + r03 + r04 // 1 = r01 - r02 + 2 * (r03 - r04) // 2 = r01 + r02 + 4 * (r03 + r04) // 3 = r01 - r02 + 8 * (r03 - r04) + r05 int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; int nColBlocks = h_tm / 6; // may be the block num in Feathercnn int nRowBlocks = w_tm / 6; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { Mat out_tm = top_blob_tm.channel(p); Mat out = top_blob_bordered.channel(p); for (int j = 0; j < nColBlocks; j++) { int* outRow0 = out.row<int>(j * 4); int* outRow1 = out.row<int>(j * 4 + 1); int* outRow2 = out.row<int>(j * 4 + 2); int* outRow3 = out.row<int>(j * 4 + 3); for (int i = 0; i < nRowBlocks; i++) { int* out_tile = out_tm.row<int>(j * nRowBlocks + i); int s0[6], s1[6], s2[6], s3[6], s4[6], s5[6]; int w0[6], w1[6], w2[6], w3[6]; int d0[4], d1[4], d2[4], d3[4], d4[4], d5[4]; int o0[4], o1[4], o2[4], o3[4]; // load for (int n = 0; n < 6; n++) { s0[n] = out_tile[n]; s1[n] = out_tile[n + 6]; s2[n] = out_tile[n + 12]; s3[n] = out_tile[n + 18]; s4[n] = out_tile[n + 24]; s5[n] = out_tile[n + 30]; } // w = A_T * W for (int n = 0; n < 6; n++) { w0[n] = s0[n] + s1[n] + s2[n] + s3[n] + s4[n]; w1[n] = s1[n] - s2[n] + 2 * s3[n] - 2 * s4[n]; w2[n] = s1[n] + s2[n] + 4 * s3[n] + 4 * s4[n]; w3[n] = s1[n] - s2[n] + 8 * s3[n] - 8 * s4[n] + s5[n]; } // transpose w to w_t { d0[0] = w0[0]; d0[1] = w1[0]; d0[2] = w2[0]; d0[3] = w3[0]; d1[0] = w0[1]; d1[1] = w1[1]; d1[2] = w2[1]; d1[3] = w3[1]; d2[0] = w0[2]; d2[1] = w1[2]; d2[2] = w2[2]; d2[3] = w3[2]; d3[0] = w0[3]; d3[1] = w1[3]; d3[2] = w2[3]; d3[3] = w3[3]; d4[0] = w0[4]; d4[1] = w1[4]; d4[2] = w2[4]; d4[3] = w3[4]; d5[0] = w0[5]; d5[1] = w1[5]; d5[2] = w2[5]; d5[3] = w3[5]; } // Y = A_T * w_t for (int n = 0; n < 4; n++) { o0[n] = d0[n] + d1[n] + d2[n] + d3[n] + d4[n]; o1[n] = d1[n] - d2[n] + 2 * d3[n] - 2 * d4[n]; o2[n] = d1[n] + d2[n] + 4 * d3[n] + 4 * d4[n]; o3[n] = d1[n] - d2[n] + 8 * d3[n] - 8 * d4[n] + d5[n]; } // save to top blob tm for (int n = 0; n < 4; n++) { outRow0[n] = o0[n] / 576; outRow1[n] = o1[n] / 576; outRow2[n] = o2[n] / 576; outRow3[n] = o3[n] / 576; } outRow0 += 4; outRow1 += 4; outRow2 += 4; outRow3 += 4; } } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt); } static void conv3x3s2_int8_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int tailstep = w - 2 * outw + w; const signed char* kernel = _kernel; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { Mat out0 = top_blob.channel(p); out0.fill(0); const signed char* kernel0 = (const signed char*)kernel + p * inch * 9; for (int q = 0; q < inch; q++) { int* outptr0 = out0; const signed char* img0 = bottom_blob.channel(q); const signed char* r0 = img0; const signed char* r1 = img0 + w; const signed char* r2 = img0 + w * 2; for (int i = 0; i < outh; i++) { int remain = outw; for (; remain > 0; remain--) { int sum0 = 0; sum0 += (int)r0[0] * kernel0[0]; sum0 += (int)r0[1] * kernel0[1]; sum0 += (int)r0[2] * kernel0[2]; sum0 += (int)r1[0] * kernel0[3]; sum0 += (int)r1[1] * kernel0[4]; sum0 += (int)r1[2] * kernel0[5]; sum0 += (int)r2[0] * kernel0[6]; sum0 += (int)r2[1] * kernel0[7]; sum0 += (int)r2[2] * kernel0[8]; *outptr0 += sum0; r0 += 2; r1 += 2; r2 += 2; outptr0++; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } kernel0 += 9; } } }
attribute.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % AAA TTTTT TTTTT RRRR IIIII BBBB U U TTTTT EEEEE % % A A T T R R I B B U U T E % % AAAAA T T RRRR I BBBB U U T EEE % % A A T T R R I B B U U T E % % A A T T R R IIIII BBBB UUU T EEEEE % % % % % % MagickCore Get / Set Image Attributes % % % % Software Design % % John Cristy % % October 2002 % % % % % % Copyright 1999-2013 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/artifact.h" #include "magick/attribute.h" #include "magick/blob.h" #include "magick/blob-private.h" #include "magick/cache.h" #include "magick/cache-private.h" #include "magick/cache-view.h" #include "magick/client.h" #include "magick/channel.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colormap.h" #include "magick/colormap-private.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/composite.h" #include "magick/composite-private.h" #include "magick/constitute.h" #include "magick/deprecate.h" #include "magick/draw.h" #include "magick/draw-private.h" #include "magick/effect.h" #include "magick/enhance.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/geometry.h" #include "magick/histogram.h" #include "magick/identify.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/log.h" #include "magick/memory_.h" #include "magick/magick.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/paint.h" #include "magick/pixel.h" #include "magick/pixel-private.h" #include "magick/property.h" #include "magick/quantize.h" #include "magick/random_.h" #include "magick/resource_.h" #include "magick/semaphore.h" #include "magick/segment.h" #include "magick/splay-tree.h" #include "magick/string_.h" #include "magick/thread-private.h" #include "magick/threshold.h" #include "magick/transform.h" #include "magick/utility.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e B o u n d i n g B o x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageBoundingBox() returns the bounding box of an image canvas. % % The format of the GetImageBoundingBox method is: % % RectangleInfo GetImageBoundingBox(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o bounds: Method GetImageBoundingBox returns the bounding box of an % image canvas. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport RectangleInfo GetImageBoundingBox(const Image *image, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; MagickPixelPacket target[3], zero; RectangleInfo bounds; register const PixelPacket *p; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); bounds.width=0; bounds.height=0; bounds.x=(ssize_t) image->columns; bounds.y=(ssize_t) image->rows; GetMagickPixelPacket(image,&target[0]); image_view=AcquireVirtualCacheView(image,exception); p=GetCacheViewVirtualPixels(image_view,0,0,1,1,exception); if (p == (const PixelPacket *) NULL) { image_view=DestroyCacheView(image_view); return(bounds); } SetMagickPixelPacket(image,p,GetCacheViewVirtualIndexQueue(image_view), &target[0]); GetMagickPixelPacket(image,&target[1]); p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,0,1,1, exception); SetMagickPixelPacket(image,p,GetCacheViewVirtualIndexQueue(image_view), &target[1]); GetMagickPixelPacket(image,&target[2]); p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-1,1,1, exception); SetMagickPixelPacket(image,p,GetCacheViewVirtualIndexQueue(image_view), &target[2]); status=MagickTrue; GetMagickPixelPacket(image,&zero); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickPixelPacket pixel; RectangleInfo bounding_box; register const IndexPacket *restrict indexes; register const PixelPacket *restrict p; register ssize_t x; if (status == MagickFalse) continue; #if defined(MAGICKCORE_OPENMP_SUPPORT) # pragma omp critical (MagickCore_GetImageBoundingBox) #endif bounding_box=bounds; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { SetMagickPixelPacket(image,p,indexes+x,&pixel); if ((x < bounding_box.x) && (IsMagickColorSimilar(&pixel,&target[0]) == MagickFalse)) bounding_box.x=x; if ((x > (ssize_t) bounding_box.width) && (IsMagickColorSimilar(&pixel,&target[1]) == MagickFalse)) bounding_box.width=(size_t) x; if ((y < bounding_box.y) && (IsMagickColorSimilar(&pixel,&target[0]) == MagickFalse)) bounding_box.y=y; if ((y > (ssize_t) bounding_box.height) && (IsMagickColorSimilar(&pixel,&target[2]) == MagickFalse)) bounding_box.height=(size_t) y; p++; } #if defined(MAGICKCORE_OPENMP_SUPPORT) # pragma omp critical (MagickCore_GetImageBoundingBox) #endif { if (bounding_box.x < bounds.x) bounds.x=bounding_box.x; if (bounding_box.y < bounds.y) bounds.y=bounding_box.y; if (bounding_box.width > bounds.width) bounds.width=bounding_box.width; if (bounding_box.height > bounds.height) bounds.height=bounding_box.height; } } image_view=DestroyCacheView(image_view); if ((bounds.width == 0) || (bounds.height == 0)) (void) ThrowMagickException(exception,GetMagickModule(),OptionWarning, "GeometryDoesNotContainImage","`%s'",image->filename); else { bounds.width-=(bounds.x-1); bounds.height-=(bounds.y-1); } return(bounds); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e C h a n n e l D e p t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageChannelDepth() returns the depth of a particular image channel. % % The format of the GetImageChannelDepth method is: % % size_t GetImageDepth(const Image *image,ExceptionInfo *exception) % size_t GetImageChannelDepth(const Image *image, % const ChannelType channel,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o exception: return any errors or warnings in this structure. % */ MagickExport size_t GetImageDepth(const Image *image,ExceptionInfo *exception) { return(GetImageChannelDepth(image,CompositeChannels,exception)); } MagickExport size_t GetImageChannelDepth(const Image *image, const ChannelType channel,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; register ssize_t id; size_t *current_depth, depth, number_threads; ssize_t y; /* Compute image depth. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); number_threads=(size_t) GetMagickResourceLimit(ThreadResource); current_depth=(size_t *) AcquireQuantumMemory(number_threads, sizeof(*current_depth)); if (current_depth == (size_t *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); status=MagickTrue; for (id=0; id < (ssize_t) number_threads; id++) current_depth[id]=1; if ((image->storage_class == PseudoClass) && (image->matte == MagickFalse)) { register ssize_t i; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,1,1) #endif for (i=0; i < (ssize_t) image->colors; i++) { const int id = GetOpenMPThreadId(); while (current_depth[id] < MAGICKCORE_QUANTUM_DEPTH) { MagickStatusType status; QuantumAny range; status=0; range=GetQuantumRange(current_depth[id]); if ((channel & RedChannel) != 0) status&=image->colormap[i].red != ScaleAnyToQuantum( ScaleQuantumToAny(image->colormap[i].red,range),range); if ((channel & GreenChannel) != 0) status&=image->colormap[i].green != ScaleAnyToQuantum( ScaleQuantumToAny(image->colormap[i].green,range),range); if ((channel & BlueChannel) != 0) status&=image->colormap[i].blue != ScaleAnyToQuantum( ScaleQuantumToAny(image->colormap[i].blue,range),range); if (status == 0) break; current_depth[id]++; } } depth=current_depth[0]; for (id=1; id < (ssize_t) number_threads; id++) if (depth < current_depth[id]) depth=current_depth[id]; current_depth=(size_t *) RelinquishMagickMemory(current_depth); return(depth); } image_view=AcquireVirtualCacheView(image,exception); #if !defined(MAGICKCORE_HDRI_SUPPORT) if (QuantumRange <= MaxMap) { register ssize_t i; size_t *depth_map; /* Scale pixels to desired (optimized with depth map). */ depth_map=(size_t *) AcquireQuantumMemory(MaxMap+1,sizeof(*depth_map)); if (depth_map == (size_t *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); for (i=0; i <= (ssize_t) MaxMap; i++) { unsigned int depth; for (depth=1; depth < MAGICKCORE_QUANTUM_DEPTH; depth++) { Quantum pixel; QuantumAny range; range=GetQuantumRange(depth); pixel=(Quantum) i; if (pixel == ScaleAnyToQuantum(ScaleQuantumToAny(pixel,range),range)) break; } depth_map[i]=depth; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); register const IndexPacket *restrict indexes; register const PixelPacket *restrict p; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) continue; indexes=GetCacheViewVirtualIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { Quantum pixel; if ((channel & RedChannel) != 0) { pixel=GetPixelRed(p); if (depth_map[ScaleQuantumToMap(pixel)] > current_depth[id]) current_depth[id]=depth_map[ScaleQuantumToMap(pixel)]; } if ((channel & GreenChannel) != 0) { pixel=GetPixelGreen(p); if (depth_map[ScaleQuantumToMap(pixel)] > current_depth[id]) current_depth[id]=depth_map[ScaleQuantumToMap(pixel)]; } if ((channel & BlueChannel) != 0) { pixel=GetPixelBlue(p); if (depth_map[ScaleQuantumToMap(pixel)] > current_depth[id]) current_depth[id]=depth_map[ScaleQuantumToMap(pixel)]; } if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) { pixel=GetPixelOpacity(p); if (depth_map[ScaleQuantumToMap(pixel)] > current_depth[id]) current_depth[id]=depth_map[ScaleQuantumToMap(pixel)]; } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { pixel=GetPixelIndex(indexes+x); if (depth_map[ScaleQuantumToMap(pixel)] > current_depth[id]) current_depth[id]=depth_map[ScaleQuantumToMap(pixel)]; } p++; } if (current_depth[id] == MAGICKCORE_QUANTUM_DEPTH) status=MagickFalse; } image_view=DestroyCacheView(image_view); depth=current_depth[0]; for (id=1; id < (ssize_t) number_threads; id++) if (depth < current_depth[id]) depth=current_depth[id]; depth_map=(size_t *) RelinquishMagickMemory(depth_map); current_depth=(size_t *) RelinquishMagickMemory(current_depth); return(depth); } #endif #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); register const IndexPacket *restrict indexes; register const PixelPacket *restrict p; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) continue; indexes=GetCacheViewVirtualIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { while (current_depth[id] < MAGICKCORE_QUANTUM_DEPTH) { MagickStatusType status; QuantumAny range; status=0; range=GetQuantumRange(current_depth[id]); if ((channel & RedChannel) != 0) status&=GetPixelRed(p) != ScaleAnyToQuantum( ScaleQuantumToAny(GetPixelRed(p),range),range); if ((channel & GreenChannel) != 0) status&=GetPixelGreen(p) != ScaleAnyToQuantum( ScaleQuantumToAny(GetPixelGreen(p),range),range); if ((channel & BlueChannel) != 0) status&=GetPixelBlue(p) != ScaleAnyToQuantum( ScaleQuantumToAny(GetPixelBlue(p),range),range); if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) status&=GetPixelOpacity(p) != ScaleAnyToQuantum( ScaleQuantumToAny(GetPixelOpacity(p),range),range); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) status&=GetPixelIndex(indexes+x) != ScaleAnyToQuantum( ScaleQuantumToAny(GetPixelIndex(indexes+x),range),range); if (status == 0) break; current_depth[id]++; } p++; } if (current_depth[id] == MAGICKCORE_QUANTUM_DEPTH) status=MagickFalse; } image_view=DestroyCacheView(image_view); depth=current_depth[0]; for (id=1; id < (ssize_t) number_threads; id++) if (depth < current_depth[id]) depth=current_depth[id]; current_depth=(size_t *) RelinquishMagickMemory(current_depth); return(depth); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e Q u a n t u m D e p t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageQuantumDepth() returns the depth of the image rounded to a legal % quantum depth: 8, 16, or 32. % % The format of the GetImageQuantumDepth method is: % % size_t GetImageQuantumDepth(const Image *image, % const MagickBooleanType constrain) % % A description of each parameter follows: % % o image: the image. % % o constrain: A value other than MagickFalse, constrains the depth to % a maximum of MAGICKCORE_QUANTUM_DEPTH. % */ static inline double MagickMin(const double x,const double y) { if (x < y) return(x); return(y); } MagickExport size_t GetImageQuantumDepth(const Image *image, const MagickBooleanType constrain) { size_t depth; depth=image->depth; if (depth <= 8) depth=8; else if (depth <= 16) depth=16; else if (depth <= 32) depth=32; else if (depth <= 64) depth=64; if (constrain != MagickFalse) depth=(size_t) MagickMin((double) depth,(double) MAGICKCORE_QUANTUM_DEPTH); return(depth); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e T y p e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageType() returns the potential type of image: % % Bilevel Grayscale GrayscaleMatte % Palette PaletteMatte TrueColor % TrueColorMatte ColorSeparation ColorSeparationMatte % % To ensure the image type matches its potential, use SetImageType(): % % (void) SetImageType(image,GetImageType(image)); % % The format of the GetImageType method is: % % ImageType GetImageType(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport ImageType GetImageType(const Image *image,ExceptionInfo *exception) { assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->colorspace == CMYKColorspace) { if (image->matte == MagickFalse) return(ColorSeparationType); return(ColorSeparationMatteType); } if (IsMonochromeImage(image,exception) != MagickFalse) return(BilevelType); if (IsGrayImage(image,exception) != MagickFalse) { if (image->matte != MagickFalse) return(GrayscaleMatteType); return(GrayscaleType); } if (IsPaletteImage(image,exception) != MagickFalse) { if (image->matte != MagickFalse) return(PaletteMatteType); return(PaletteType); } if (image->matte != MagickFalse) return(TrueColorMatteType); return(TrueColorType); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s G r a y I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsGrayImage() returns MagickTrue if all the pixels in the image have the % same red, green, and blue intensities. % % The format of the IsGrayImage method is: % % MagickBooleanType IsGrayImage(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType IsGrayImage(const Image *image, ExceptionInfo *exception) { CacheView *image_view; ImageType type; register const PixelPacket *p; register ssize_t x; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((image->type == BilevelType) || (image->type == GrayscaleType) || (image->type == GrayscaleMatteType)) return(MagickTrue); if ((IsGrayColorspace(image->colorspace) == MagickFalse) && (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)) return(MagickFalse); type=BilevelType; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (IsGrayPixel(p) == MagickFalse) { type=UndefinedType; break; } if ((type == BilevelType) && (IsMonochromePixel(p) == MagickFalse)) type=GrayscaleType; p++; } if (type == UndefinedType) break; } image_view=DestroyCacheView(image_view); if (type == UndefinedType) return(MagickFalse); ((Image *) image)->colorspace=GRAYColorspace; if (SyncImagePixelCache((Image *) image,exception) == MagickFalse) return(MagickFalse); ((Image *) image)->type=type; if ((type == GrayscaleType) && (image->matte != MagickFalse)) ((Image *) image)->type=GrayscaleMatteType; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s M o n o c h r o m e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsMonochromeImage() returns MagickTrue if all the pixels in the image have % the same red, green, and blue intensities and the intensity is either % 0 or QuantumRange. % % The format of the IsMonochromeImage method is: % % MagickBooleanType IsMonochromeImage(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType IsMonochromeImage(const Image *image, ExceptionInfo *exception) { CacheView *image_view; ImageType type; MagickBooleanType status; register ssize_t x; register const PixelPacket *p; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->type == BilevelType) return(MagickTrue); if ((IsGrayColorspace(image->colorspace) == MagickFalse) && (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)) return(MagickFalse); type=BilevelType; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (IsMonochromePixel(p) == MagickFalse) { type=UndefinedType; break; } p++; } if (type == UndefinedType) break; } image_view=DestroyCacheView(image_view); if (type == UndefinedType) return(MagickFalse); ((Image *) image)->colorspace=GRAYColorspace; status=SyncImagePixelCache((Image *) image,exception); if (SyncImagePixelCache((Image *) image,exception) == MagickFalse) return(status); ((Image *) image)->type=type; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s O p a q u e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsOpaqueImage() returns MagickTrue if none of the pixels in the image have % an opacity value other than opaque (0). % % The format of the IsOpaqueImage method is: % % MagickBooleanType IsOpaqueImage(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType IsOpaqueImage(const Image *image, ExceptionInfo *exception) { CacheView *image_view; register const PixelPacket *p; register ssize_t x; ssize_t y; /* Determine if image is opaque. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->matte == MagickFalse) return(MagickTrue); image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelOpacity(p) != OpaqueOpacity) break; p++; } if (x < (ssize_t) image->columns) break; } image_view=DestroyCacheView(image_view); return(y < (ssize_t) image->rows ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e C h a n n e l D e p t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageChannelDepth() sets the depth of the image. % % The format of the SetImageChannelDepth method is: % % MagickBooleanType SetImageDepth(Image *image,const size_t depth) % MagickBooleanType SetImageChannelDepth(Image *image, % const ChannelType channel,const size_t depth) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o depth: the image depth. % */ MagickExport MagickBooleanType SetImageDepth(Image *image, const size_t depth) { return(SetImageChannelDepth(image,CompositeChannels,depth)); } MagickExport MagickBooleanType SetImageChannelDepth(Image *image, const ChannelType channel,const size_t depth) { CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; QuantumAny range; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickSignature); if (depth >= MAGICKCORE_QUANTUM_DEPTH) { image->depth=depth; return(MagickTrue); } range=GetQuantumRange(depth); if (image->storage_class == PseudoClass) { register ssize_t i; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,1,1) #endif for (i=0; i < (ssize_t) image->colors; i++) { if ((channel & RedChannel) != 0) image->colormap[i].red=ScaleAnyToQuantum(ScaleQuantumToAny( image->colormap[i].red,range),range); if ((channel & GreenChannel) != 0) image->colormap[i].green=ScaleAnyToQuantum(ScaleQuantumToAny( image->colormap[i].green,range),range); if ((channel & BlueChannel) != 0) image->colormap[i].blue=ScaleAnyToQuantum(ScaleQuantumToAny( image->colormap[i].blue,range),range); if ((channel & OpacityChannel) != 0) image->colormap[i].opacity=ScaleAnyToQuantum(ScaleQuantumToAny( image->colormap[i].opacity,range),range); } } status=MagickTrue; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if !defined(MAGICKCORE_HDRI_SUPPORT) if (QuantumRange <= MaxMap) { Quantum *depth_map; register ssize_t i; /* Scale pixels to desired (optimized with depth map). */ depth_map=(Quantum *) AcquireQuantumMemory(MaxMap+1,sizeof(*depth_map)); if (depth_map == (Quantum *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); for (i=0; i <= (ssize_t) MaxMap; i++) depth_map[i]=ScaleAnyToQuantum(ScaleQuantumToAny((Quantum) i,range), range); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q,depth_map[ScaleQuantumToMap(GetPixelRed(q))]); if ((channel & GreenChannel) != 0) SetPixelGreen(q,depth_map[ScaleQuantumToMap(GetPixelGreen(q))]); if ((channel & BlueChannel) != 0) SetPixelBlue(q,depth_map[ScaleQuantumToMap(GetPixelBlue(q))]); if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) SetPixelOpacity(q,depth_map[ScaleQuantumToMap(GetPixelOpacity(q))]); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) { status=MagickFalse; continue; } } image_view=DestroyCacheView(image_view); depth_map=(Quantum *) RelinquishMagickMemory(depth_map); if (status != MagickFalse) image->depth=depth; return(status); } #endif /* Scale pixels to desired depth. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q,ScaleAnyToQuantum(ScaleQuantumToAny(GetPixelRed(q), range),range)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ScaleAnyToQuantum(ScaleQuantumToAny(GetPixelGreen(q), range),range)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ScaleAnyToQuantum(ScaleQuantumToAny(GetPixelBlue(q), range),range)); if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) SetPixelOpacity(q,ScaleAnyToQuantum(ScaleQuantumToAny( GetPixelOpacity(q),range),range)); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) { status=MagickFalse; continue; } } image_view=DestroyCacheView(image_view); if (status != MagickFalse) image->depth=depth; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e T y p e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageType() sets the type of image. Choose from these types: % % BilevelType, GrayscaleType, GrayscaleMatteType, PaletteType, % PaletteMatteType, TrueColorType, TrueColorMatteType, % ColorSeparationType, ColorSeparationMatteType, OptimizeType % % The format of the SetImageType method is: % % MagickBooleanType SetImageType(Image *image,const ImageType type) % % A description of each parameter follows: % % o image: the image. % % o type: Image type. % */ MagickExport MagickBooleanType SetImageType(Image *image,const ImageType type) { const char *artifact; ImageInfo *image_info; MagickBooleanType status; QuantizeInfo *quantize_info; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickSignature); status=MagickTrue; image_info=AcquireImageInfo(); image_info->dither=image->dither; artifact=GetImageArtifact(image,"dither"); if (artifact != (const char *) NULL) (void) SetImageOption(image_info,"dither",artifact); switch (type) { case BilevelType: { if (IsGrayImage(image,&image->exception) == MagickFalse) status=TransformImageColorspace(image,GRAYColorspace); if (IsMonochromeImage(image,&image->exception) == MagickFalse) { quantize_info=AcquireQuantizeInfo(image_info); quantize_info->number_colors=2; quantize_info->colorspace=GRAYColorspace; status=QuantizeImage(quantize_info,image); quantize_info=DestroyQuantizeInfo(quantize_info); } image->matte=MagickFalse; break; } case GrayscaleType: { if (IsGrayImage(image,&image->exception) == MagickFalse) status=TransformImageColorspace(image,GRAYColorspace); image->matte=MagickFalse; break; } case GrayscaleMatteType: { if (IsGrayImage(image,&image->exception) == MagickFalse) status=TransformImageColorspace(image,GRAYColorspace); if (image->matte == MagickFalse) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); break; } case PaletteType: { if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) status=TransformImageColorspace(image,sRGBColorspace); if ((image->storage_class == DirectClass) || (image->colors > 256)) { quantize_info=AcquireQuantizeInfo(image_info); quantize_info->number_colors=256; status=QuantizeImage(quantize_info,image); quantize_info=DestroyQuantizeInfo(quantize_info); } image->matte=MagickFalse; break; } case PaletteBilevelMatteType: { if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) status=TransformImageColorspace(image,sRGBColorspace); if (image->matte == MagickFalse) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); (void) BilevelImageChannel(image,AlphaChannel,(double) QuantumRange/2.0); quantize_info=AcquireQuantizeInfo(image_info); status=QuantizeImage(quantize_info,image); quantize_info=DestroyQuantizeInfo(quantize_info); break; } case PaletteMatteType: { if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) status=TransformImageColorspace(image,sRGBColorspace); if (image->matte == MagickFalse) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); quantize_info=AcquireQuantizeInfo(image_info); quantize_info->colorspace=TransparentColorspace; status=QuantizeImage(quantize_info,image); quantize_info=DestroyQuantizeInfo(quantize_info); break; } case TrueColorType: { if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) status=TransformImageColorspace(image,sRGBColorspace); if (image->storage_class != DirectClass) status=SetImageStorageClass(image,DirectClass); image->matte=MagickFalse; break; } case TrueColorMatteType: { if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) status=TransformImageColorspace(image,sRGBColorspace); if (image->storage_class != DirectClass) status=SetImageStorageClass(image,DirectClass); if (image->matte == MagickFalse) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); break; } case ColorSeparationType: { if (image->colorspace != CMYKColorspace) { if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) status=TransformImageColorspace(image,sRGBColorspace); status=TransformImageColorspace(image,CMYKColorspace); } if (image->storage_class != DirectClass) status=SetImageStorageClass(image,DirectClass); image->matte=MagickFalse; break; } case ColorSeparationMatteType: { if (image->colorspace != CMYKColorspace) { if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) status=TransformImageColorspace(image,sRGBColorspace); status=TransformImageColorspace(image,CMYKColorspace); } if (image->storage_class != DirectClass) status=SetImageStorageClass(image,DirectClass); if (image->matte == MagickFalse) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); break; } case OptimizeType: case UndefinedType: break; } image_info=DestroyImageInfo(image_info); if (status == MagickFalse) return(MagickFalse); image->type=type; return(MagickTrue); }
GB_subref_template.c
//------------------------------------------------------------------------------ // GB_subref_template: C = A(I,J), or C = pattern (A(I,J)) //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ #if defined ( GB_SYMBOLIC ) // symbolic method must tolerate zombies #define GB_Ai(p) GB_UNFLIP (Ai [p]) #else // numeric method will not see any zombies #define GB_Ai(p) Ai [p] #endif // to iterate across all entries in a bucket: #define GB_for_each_index_in_bucket(inew,i) \ for (int64_t inew = Mark[i]-1 ; inew >= 0 ; inew = Inext [inew]) // copy values from A(:,kA) to C(:,kC): Cx [pC:pC+len-1] = Ax [pA:pA+len-1]. #if defined ( GB_SYMBOLIC ) #define GB_COPY_RANGE(pC,pA,len) \ for (int64_t k = 0 ; k < (len) ; k++) \ { \ Cx [(pC) + k] = (pA) + k ; \ } #else #define GB_COPY_RANGE(pC,pA,len) \ memcpy (Cx + (pC)*GB_CSIZE1, Ax + (pA)*GB_CSIZE1, (len) * GB_CSIZE2) ; #endif // copy a single value from A(:,kA) to C(:,kC): Cx [pC] = Ax [pA]. #if defined ( GB_SYMBOLIC ) #define GB_COPY_ENTRY(pC,pA) \ Cx [pC] = (pA) ; #else #define GB_COPY_ENTRY(pC,pA) \ /* Cx [pC] = Ax [pA] */ \ memcpy (Cx + (pC)*GB_CSIZE1, Ax + (pA)*GB_CSIZE1, GB_CSIZE2) ; #endif // the type of Cx #if defined ( GB_SYMBOLIC ) #define GB_CTYPE int64_t #define GB_CSIZE1 1 #define GB_CSIZE2 (sizeof (int64_t)) #else #define GB_CTYPE GB_void #define GB_CSIZE1 asize #define GB_CSIZE2 asize // FUTURE: If built-in types are used instead of generic, then GB_COPY_ENTRY // can become Cx [pC] = Ax [pA]. However, the generic GB_qsort_1b would also // need to be replaced with type-specific versions for each built-in type. For // A and C of type double, the #defines would be: // #define GB_CTYPE double // #define GB_CSIZE1 1 // #define GB_CSIZE2 (sizeof (double)) #endif { //-------------------------------------------------------------------------- // get A //-------------------------------------------------------------------------- const int64_t *GB_RESTRICT Ai = A->i ; const int64_t avlen = A->vlen ; #if defined ( GB_SYMBOLIC ) const int64_t nzombies = A->nzombies ; #endif #if defined ( GB_PHASE_2_OF_2 ) && defined ( GB_NUMERIC ) const GB_CTYPE *GB_RESTRICT Ax = A->x ; const int64_t asize = A->type->size ; #endif //-------------------------------------------------------------------------- // get C //-------------------------------------------------------------------------- #if defined ( GB_PHASE_2_OF_2 ) int64_t *GB_RESTRICT Ci = C->i ; GB_CTYPE *GB_RESTRICT Cx = C->x ; #endif //-------------------------------------------------------------------------- // get I //-------------------------------------------------------------------------- // these values are ignored if Ikind == GB_LIST int64_t ibegin = Icolon [GxB_BEGIN] ; int64_t iinc = Icolon [GxB_INC ] ; int64_t inc = (iinc < 0) ? (-iinc) : iinc ; #ifdef GB_DEBUG int64_t iend = Icolon [GxB_END ] ; #endif //-------------------------------------------------------------------------- // phase1: count entries in each C(:,kC); phase2: compute C //-------------------------------------------------------------------------- int taskid ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (taskid = 0 ; taskid < ntasks ; taskid++) { //---------------------------------------------------------------------- // get the task descriptor //---------------------------------------------------------------------- int64_t kfirst = TaskList [taskid].kfirst ; int64_t klast = TaskList [taskid].klast ; bool fine_task = (klast < 0) ; if (fine_task) { // a fine task operates on a slice of a single vector klast = kfirst ; } // a coarse task accesses all of I for all its vectors int64_t pI = 0 ; int64_t pI_end = nI ; int64_t ilen = nI ; ASSERT (0 <= kfirst && kfirst <= klast && klast < Cnvec) ; //---------------------------------------------------------------------- // compute all vectors C(:,kfirst:klast) for this task //---------------------------------------------------------------------- for (int64_t kC = kfirst ; kC <= klast ; kC++) { //------------------------------------------------------------------ // get C(:,kC) //------------------------------------------------------------------ #if defined ( GB_PHASE_1_OF_2 ) // phase1 simply counts the # of entries in C(*,kC). int64_t clen = 0 ; #else // This task computes all or part of C(:,kC), which are the entries // in Ci,Cx [pC:pC_end-1]. int64_t pC, pC_end ; if (fine_task) { // A fine task computes a slice of C(:,kC) pC = TaskList [taskid ].pC ; pC_end = TaskList [taskid+1].pC ; ASSERT (Cp [kC] <= pC && pC <= pC_end && pC_end <= Cp [kC+1]) ; } else { // The vectors of C are never sliced for a coarse task, so this // task computes all of C(:,kC). pC = Cp [kC] ; pC_end = Cp [kC+1] ; } int64_t clen = pC_end - pC ; if (clen == 0) continue ; #endif //------------------------------------------------------------------ // get A(:,kA) //------------------------------------------------------------------ int64_t pA, pA_end ; if (fine_task) { // a fine task computes a slice of a single vector C(:,kC). // The task accesses Ai,Ax [pA:pA_end-1], which holds either // the entire vector A(imin:imax,kA) for method 6, the entire // dense A(:,kA) for methods 1 and 2, or a slice of the // A(imin:max,kA) vector for all other methods. pA = TaskList [taskid].pA ; pA_end = TaskList [taskid].pA_end ; } else { // a coarse task computes the entire vector C(:,kC). The task // accesses all of A(imin:imax,kA), for most methods, or all of // A(:,kA) for methods 1 and 2. The vector A(*,kA) appears in // Ai,Ax [pA:pA_end-1]. pA = Ap_start [kC] ; pA_end = Ap_end [kC] ; } int64_t alen = pA_end - pA ; if (alen == 0) continue ; //------------------------------------------------------------------ // get I //------------------------------------------------------------------ if (fine_task) { // A fine task accesses I [pI:pI_end-1]. For methods 2 and 6, // pI:pI_end is a subset of the entire 0:nI-1 list. For all // other methods, pI = 0 and pI_end = nI, and the task can // access all of I. pI = TaskList [taskid].pB ; pI_end = TaskList [taskid].pB_end ; ilen = pI_end - pI ; } //------------------------------------------------------------------ // determine the method to use //------------------------------------------------------------------ int method ; if (fine_task) { // The method that the fine task uses for its slice of A(*,kA) // and C(*,kC) has already been determined by GB_subref_slice. method = (int) (-TaskList [taskid].klast) ; } else { // determine the method based on A(*,kA) and I method = GB_subref_method (NULL, NULL, alen, avlen, Ikind, nI, (Mark != NULL), need_qsort, iinc, nduplicates) ; } //------------------------------------------------------------------ // extract C (:,kC) = A (I,kA): consider all cases //------------------------------------------------------------------ switch (method) { //-------------------------------------------------------------- case 1 : // C(:,kC) = A(:,kA) where A(:,kA) is dense //-------------------------------------------------------------- // A (:,kA) has not been sliced ASSERT (Ikind == GB_ALL) ; ASSERT (pA == Ap_start [kC]) ; ASSERT (pA_end == Ap_end [kC]) ; // copy the entire vector and construct indices #if defined ( GB_PHASE_1_OF_2 ) clen = ilen ; #else for (int64_t k = 0 ; k < ilen ; k++) { int64_t inew = k + pI ; ASSERT (inew == GB_ijlist (I, inew, Ikind, Icolon)) ; ASSERT (inew == GB_Ai (pA + inew)) ; Ci [pC + k] = inew ; } GB_COPY_RANGE (pC, pA + pI, ilen) ; #endif break ; //-------------------------------------------------------------- case 2 : // C(:,kC) = A(I,kA) where A(I,kA) is dense //-------------------------------------------------------------- // This method handles any kind of list I, but A(:,kA) // must be dense. A(:,kA) has not been sliced. ASSERT (pA == Ap_start [kC]) ; ASSERT (pA_end == Ap_end [kC]) ; // scan I and get the entry in A(:,kA) via direct lookup #if defined ( GB_PHASE_1_OF_2 ) clen = ilen ; #else for (int64_t k = 0 ; k < ilen ; k++) { // C(inew,kC) = A(i,kA), and it always exists. int64_t inew = k + pI ; int64_t i = GB_ijlist (I, inew, Ikind, Icolon) ; ASSERT (i == GB_Ai (pA + i)) ; Ci [pC + k] = inew ; GB_COPY_ENTRY (pC + k, pA + i) ; } #endif break ; //-------------------------------------------------------------- case 3 : // the list I has a single index, ibegin //-------------------------------------------------------------- // binary search in GB_subref_phase0 has already found it. // This can be any Ikind with nI=1: GB_ALL with A->vlen=1, // GB_RANGE with ibegin==iend, GB_STRIDE such as 0:-1:0 // (with length 1), or a GB_LIST with ni=1. // Time: 50x faster than MATLAB ASSERT (!fine_task) ; ASSERT (alen == 1) ; ASSERT (nI == 1) ; ASSERT (GB_Ai (pA) == GB_ijlist (I, 0, Ikind, Icolon)) ; #if defined ( GB_PHASE_1_OF_2 ) clen = 1 ; #else Ci [pC] = 0 ; GB_COPY_ENTRY (pC, pA) ; #endif break ; //-------------------------------------------------------------- case 4 : // Ikind is ":", thus C(:,kC) = A (:,kA) //-------------------------------------------------------------- // Time: 1x MATLAB but low speedup on the Mac. Why? // Probably memory bound since it is just memcpy's. ASSERT (Ikind == GB_ALL && ibegin == 0) ; #if defined ( GB_PHASE_1_OF_2 ) clen = alen ; #else #if defined ( GB_SYMBOLIC ) if (nzombies == 0) { memcpy (Ci + pC, Ai + pA, alen * sizeof (int64_t)) ; } else { // with zombies for (int64_t k = 0 ; k < alen ; k++) { int64_t i = GB_Ai (pA + k) ; ASSERT (i == GB_ijlist (I, i, Ikind, Icolon)) ; Ci [pC + k] = i ; } } #else memcpy (Ci + pC, Ai + pA, alen * sizeof (int64_t)) ; #endif GB_COPY_RANGE (pC, pA, alen) ; #endif break ; //-------------------------------------------------------------- case 5 : // Ikind is GB_RANGE = ibegin:iend //-------------------------------------------------------------- // Time: much faster than MATLAB. Good speedup too. ASSERT (Ikind == GB_RANGE) ; #if defined ( GB_PHASE_1_OF_2 ) clen = alen ; #else for (int64_t k = 0 ; k < alen ; k++) { int64_t i = GB_Ai (pA + k) ; int64_t inew = i - ibegin ; ASSERT (i == GB_ijlist (I, inew, Ikind, Icolon)) ; Ci [pC + k] = inew ; } GB_COPY_RANGE (pC, pA, alen) ; #endif break ; //-------------------------------------------------------------- case 6 : // I is short vs nnz (A (:,kA)), use binary search //-------------------------------------------------------------- // Time: very slow unless I is very short and A(:,kA) is // very long. // This case can handle any kind of I, and A(:,kA) of any // properties. For a fine task, A(:,kA) has not been // sliced; I has been sliced instead. // If the I bucket inverse has not been created, this // method is the only option. Alternatively, if nI = // length (I) is << nnz (A (:,kA)), then scanning I and // doing a binary search of A (:,kA) is faster than doing a // linear-time search of A(:,kA) and a lookup into the I // bucket inverse. // The vector of C is constructed in sorted order, so no // sort is needed. // A(:,kA) has not been sliced. ASSERT (pA == Ap_start [kC]) ; ASSERT (pA_end == Ap_end [kC]) ; // scan I, in order, and search for the entry in A(:,kA) for (int64_t k = 0 ; k < ilen ; k++) { // C(inew,kC) = A (i,kA), if it exists. // i = I [inew] ; or from a colon expression int64_t inew = k + pI ; int64_t i = GB_ijlist (I, inew, Ikind, Icolon) ; bool found ; int64_t pleft = pA ; int64_t pright = pA_end - 1 ; #if defined ( GB_SYMBOLIC ) bool is_zombie ; GB_BINARY_SEARCH_ZOMBIE (i, Ai, pleft, pright, found, nzombies, is_zombie) ; #else GB_BINARY_SEARCH (i, Ai, pleft, pright, found) ; #endif if (found) { ASSERT (i == GB_Ai (pleft)) ; #if defined ( GB_PHASE_1_OF_2 ) clen++ ; #else ASSERT (pC < pC_end) ; Ci [pC] = inew ; GB_COPY_ENTRY (pC, pleft) ; pC++ ; #endif } } #if defined ( GB_PHASE_2_OF_2 ) ASSERT (pC == pC_end) ; #endif break ; //-------------------------------------------------------------- case 7 : // I is ibegin:iinc:iend with iinc > 1 //-------------------------------------------------------------- // Time: 1 thread: C=A(1:2:n,:) is 3x slower than MATLAB // but has good speedup. About as fast as MATLAB with // enough threads. ASSERT (Ikind == GB_STRIDE && iinc > 1) ; for (int64_t k = 0 ; k < alen ; k++) { // A(i,kA) present; see if it is in ibegin:iinc:iend int64_t i = GB_Ai (pA + k) ; ASSERT (ibegin <= i && i <= iend) ; i = i - ibegin ; if (i % iinc == 0) { // i is in the sequence ibegin:iinc:iend #if defined ( GB_PHASE_1_OF_2 ) clen++ ; #else int64_t inew = i / iinc ; ASSERT (pC < pC_end) ; Ci [pC] = inew ; GB_COPY_ENTRY (pC, pA + k) ; pC++ ; #endif } } #if defined ( GB_PHASE_2_OF_2 ) ASSERT (pC == pC_end) ; #endif break ; //---------------------------------------------------------- case 8 : // I = ibegin:(-iinc):iend, with iinc < -1 //---------------------------------------------------------- // Time: 2x slower than MATLAB for iinc = -2 or -8. // Good speedup though. Faster than MATLAB for // large values (iinc = -128). ASSERT (Ikind == GB_STRIDE && iinc < -1) ; for (int64_t k = alen - 1 ; k >= 0 ; k--) { // A(i,kA) present; see if it is in ibegin:iinc:iend int64_t i = GB_Ai (pA + k) ; ASSERT (iend <= i && i <= ibegin) ; i = ibegin - i ; if (i % inc == 0) { // i is in the sequence ibegin:iinc:iend #if defined ( GB_PHASE_1_OF_2 ) clen++ ; #else int64_t inew = i / inc ; ASSERT (pC < pC_end) ; Ci [pC] = inew ; GB_COPY_ENTRY (pC, pA + k) ; pC++ ; #endif } } #if defined ( GB_PHASE_2_OF_2 ) ASSERT (pC == pC_end) ; #endif break ; //---------------------------------------------------------- case 9 : // I = ibegin:(-1):iend //---------------------------------------------------------- // Time: much faster than MATLAB. Good speedup. ASSERT (Ikind == GB_STRIDE && iinc == -1) ; #if defined ( GB_PHASE_1_OF_2 ) clen = alen ; #else for (int64_t k = alen - 1 ; k >= 0 ; k--) { // A(i,kA) is present int64_t i = GB_Ai (pA + k) ; int64_t inew = (ibegin - i) ; ASSERT (i == GB_ijlist (I, inew, Ikind, Icolon)) ; Ci [pC] = inew ; GB_COPY_ENTRY (pC, pA + k) ; pC++ ; } #endif break ; //-------------------------------------------------------------- case 10 : // I unsorted, and C needs qsort, duplicates OK //-------------------------------------------------------------- // Time: with one thread: 2x slower than MATLAB, probably // because of the qsort. Good speedup however. This used // if qsort is needed but ndupl == 0. Try a method that // needs qsort, but no duplicates? // Case 10 works well when I has many entries and A(:,kA) // has few entries. C(:,kC) must be sorted after this pass. ASSERT (Ikind == GB_LIST) ; for (int64_t k = 0 ; k < alen ; k++) { // A(i,kA) present, look it up in the I inverse buckets int64_t i = GB_Ai (pA + k) ; // traverse bucket i for all indices inew where // i == I [inew] or where i is from a colon expression GB_for_each_index_in_bucket (inew, i) { ASSERT (inew >= 0 && inew < nI) ; ASSERT (i == GB_ijlist (I, inew, Ikind, Icolon)) ; #if defined ( GB_PHASE_1_OF_2 ) clen++ ; #else Ci [pC] = inew ; GB_COPY_ENTRY (pC, pA + k) ; pC++ ; #endif } } #if defined ( GB_PHASE_2_OF_2 ) ASSERT (pC == pC_end) ; if (!fine_task) { // a coarse task owns this entire C(:,kC) vector, so // the sort can be done now. The sort for vectors // handled by multiple fine tasks must wait until all // task are completed, below in the post sort. pC = Cp [kC] ; GB_qsort_1b (Ci + pC, (GB_void *) (Cx + pC*GB_CSIZE1), GB_CSIZE2, clen) ; } #endif break ; //-------------------------------------------------------------- case 11 : // I not contiguous, with duplicates. No qsort needed //-------------------------------------------------------------- // Case 11 works well when I has many entries and A(:,kA) // has few entries. It requires that I be sorted on input, // so that no sort is required for C(:,kC). It is // otherwise identical to Case 9. ASSERT (Ikind == GB_LIST) ; for (int64_t k = 0 ; k < alen ; k++) { // A(i,kA) present, look it up in the I inverse buckets int64_t i = GB_Ai (pA + k) ; // traverse bucket i for all indices inew where // i == I [inew] or where i is from a colon expression GB_for_each_index_in_bucket (inew, i) { ASSERT (inew >= 0 && inew < nI) ; ASSERT (i == GB_ijlist (I, inew, Ikind, Icolon)) ; #if defined ( GB_PHASE_1_OF_2 ) clen++ ; #else Ci [pC] = inew ; GB_COPY_ENTRY (pC, pA + k) ; pC++ ; #endif } } #if defined ( GB_PHASE_2_OF_2 ) ASSERT (pC == pC_end) ; #endif break ; //-------------------------------------------------------------- case 12 : // I not contiguous, no duplicates. No qsort needed. //-------------------------------------------------------------- // Identical to Case 11, except GB_for_each_index_in_bucket // just needs to iterate 0 or 1 times. Works well when I // has many entries and A(:,kA) has few entries. ASSERT (Ikind == GB_LIST && nduplicates == 0) ; for (int64_t k = 0 ; k < alen ; k++) { // A(i,kA) present, look it up in the I inverse buckets int64_t i = GB_Ai (pA + k) ; // bucket i has at most one index inew such that // i == I [inew] int64_t inew = Mark [i] - 1 ; if (inew >= 0) { ASSERT (inew >= 0 && inew < nI) ; ASSERT (i == GB_ijlist (I, inew, Ikind, Icolon)) ; #if defined ( GB_PHASE_1_OF_2 ) clen++ ; #else Ci [pC] = inew ; GB_COPY_ENTRY (pC, pA + k) ; pC++ ; #endif } } #if defined ( GB_PHASE_2_OF_2 ) ASSERT (pC == pC_end) ; #endif break ; //-------------------------------------------------------------- default:; //-------------------------------------------------------------- } //------------------------------------------------------------------ // final count of nnz (C (:,j)) //------------------------------------------------------------------ #if defined ( GB_PHASE_1_OF_2 ) if (fine_task) { TaskList [taskid].pC = clen ; } else { Cp [kC] = clen ; } #endif } } //-------------------------------------------------------------------------- // phase2: post sort for any vectors handled by fine tasks with method 10 //-------------------------------------------------------------------------- #if defined ( GB_PHASE_2_OF_2 ) if (post_sort) { int taskid ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (taskid = 0 ; taskid < ntasks ; taskid++) { int64_t kC = TaskList [taskid].kfirst ; bool do_post_sort = (TaskList [taskid].len != 0) ; if (do_post_sort) { // This is the first fine task with method 10 for C(:,kC). The // vector C(:,kC) must be sorted, since method 10 left it with // unsorted indices. int64_t pC = Cp [kC] ; int64_t clen = Cp [kC+1] - pC ; GB_qsort_1b (Ci + pC, (GB_void *) (Cx + pC*GB_CSIZE1), GB_CSIZE2, clen) ; } } } #endif } #undef GB_Ai #undef GB_for_each_index_in_bucket #undef GB_COPY_RANGE #undef GB_COPY_ENTRY #undef GB_CTYPE #undef GB_CSIZE1 #undef GB_CSIZE2
matmul_cpu_omp_kernel.c
#include <homp.h> #include "matmul.h" #ifdef USE_INTEL_MKL #include <mkl.h> #endif void matmul_cpu_omp_wrapper(omp_offloading_t *off, long i, long j,long k,float *A,float *B,float *C) { int num_omp_threads = off->dev->num_cores; long ii, jj, kk; // omp_set_num_threads(off->dev->num_cores); //printf("%d cores on dev: %s\n", off->dev->num_cores, off->dev->name); #ifdef USE_INTEL_MKL mkl_mic_disable(); REAL alpha = 1; REAL beta = 0; #endif #ifdef USE_INTEL_MKL #pragma omp parallel shared(A, B, C, i,j,k) private(ii, jj, kk) num_threads(num_omp_threads) cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans, i, j, k, alpha, A, k, B, j, beta, C, j); //mkl_mic_enable(); #else #pragma omp parallel for shared(A, B, C, i,j,k) private(ii, jj, kk) num_threads(num_omp_threads) for (ii = 0; ii < i; ii++) { for (jj = 0; jj < j; jj++) { REAL sum = 0.0; for (kk = 0; kk < k; kk++) { sum += A[ii * k + kk] * B[kk * j + jj]; } C[ii * j + jj] = sum; } } #endif #if 0 #pragma omp parallel for shared(A, B, C, i,j,k) private(ii, jj, kk) num_threads(num_omp_threads) for (ii = 0; ii < i; ii++) { for (jj = 0; jj < j; jj++) { REAL sum = 0.0; for (kk = 0; kk < k; kk++) { sum += A[ii * k + kk] * B[kk * j + jj]; } C[ii * j + jj] = sum; } } #endif }
InternalTransferCircuit.h
#ifndef _INTERNAL_TRANSFER_CIRCUIT_H_ #define _INTERNAL_TRANSFER_CIRCUIT_H_ #include "Circuit.h" #include "../Utils/Constants.h" #include "../Utils/Data.h" #include "../Utils/Utils.h" #include "../Gadgets/AccountGadgets.h" #include "../Gadgets/TradingHistoryGadgets.h" #include "ethsnarks.hpp" #include "utils.hpp" using namespace ethsnarks; namespace Loopring { class InternalTransferGadget : public GadgetT { public: const Constants& constants; // User From state BalanceGadget balanceFBefore_From; BalanceGadget balanceTBefore_From; AccountGadget accountBefore_From; // User To state BalanceGadget balanceTBefore_To; AccountGadget accountBefore_To; // Operator state BalanceGadget balanceBefore_O; // Inputs DualVariableGadget accountID_From; DualVariableGadget accountID_To; DualVariableGadget tokenID; DualVariableGadget amount; DualVariableGadget feeTokenID; DualVariableGadget fee; DualVariableGadget type; // Signature Poseidon_gadget_T<9, 1, 6, 53, 8, 1> hash; SignatureVerifier signatureVerifier; // Type NotGadget signatureInvalid; UnsafeAddGadget numConditionalTransfersAfter; RequireEqualGadget type_eq_signatureInvalid; // User To account check RequireNotZeroGadget publicKeyX_notZero; // Fee as float FloatGadget fFee; RequireAccuracyGadget requireAccuracyFee; // Amount as float FloatGadget fAmount; RequireAccuracyGadget requireAccuracyAmount; // Fee payment from From to the operator subadd_gadget feePayment; // Transfer from From to To subadd_gadget transferPayment; // Increase the nonce of From by 1 AddGadget nonce_From_after; // Update User From UpdateBalanceGadget updateBalanceF_From; UpdateBalanceGadget updateBalanceT_From; UpdateAccountGadget updateAccount_From; // Update User To UpdateBalanceGadget updateBalanceT_To; UpdateAccountGadget updateAccount_To; // Update Operator UpdateBalanceGadget updateBalanceF_O; InternalTransferGadget( ProtoboardT &pb, const jubjub::Params& params, const Constants& _constants, const VariableT& accountsMerkleRoot, const VariableT& operatorBalancesRoot, const VariableT& blockExchangeID, const VariableT& numConditionalTransfersBefore, const std::string &prefix ) : GadgetT(pb, prefix), constants(_constants), // User From state balanceFBefore_From(pb, FMT(prefix, "balanceFBefore_From")), balanceTBefore_From(pb, FMT(prefix, "balanceTBefore_From")), accountBefore_From(pb, FMT(prefix, "accountBefore_From")), // User To state balanceTBefore_To(pb, FMT(prefix, "balanceTBefore_To")), accountBefore_To(pb, FMT(prefix, "accountBefore_To")), // Operator state balanceBefore_O(pb, FMT(prefix, "balanceBefore_O")), // Inputs accountID_From(pb, NUM_BITS_ACCOUNT, FMT(prefix, ".accountID_From")), accountID_To(pb, NUM_BITS_ACCOUNT, FMT(prefix, ".accountID_To")), tokenID(pb, NUM_BITS_TOKEN, FMT(prefix, ".tokenID")), amount(pb, NUM_BITS_AMOUNT, FMT(prefix, ".amount")), feeTokenID(pb, NUM_BITS_TOKEN, FMT(prefix, ".feeTokenID")), fee(pb, NUM_BITS_AMOUNT, FMT(prefix, ".fee")), type(pb, NUM_BITS_TYPE, FMT(prefix, ".type")), // Signature // 首先对InternalTransfer from方的交易进行验签 hash(pb, var_array({blockExchangeID, accountID_From.packed, accountID_To.packed, tokenID.packed, amount.packed, feeTokenID.packed, fee.packed, accountBefore_From.nonce}), FMT(this->annotation_prefix, ".hash")), signatureVerifier(pb, params, constants, accountBefore_From.publicKey, hash.result(), FMT(prefix, ".signatureVerifier"), false), // Type signatureInvalid(pb, signatureVerifier.result(), ".signatureInvalid"), // numConditionalTransfersAfter计算没有验签通过交易数量。 numConditionalTransfersAfter(pb, numConditionalTransfersBefore, signatureInvalid.result(), ".numConditionalTransfersAfter"), // 这里对交易的type进行了校验,即验签结果和type值的类型需要相同 type_eq_signatureInvalid(pb, type.packed, signatureInvalid.result(), ".type_eq_signatureInvalid"), // User To account check // 确保to账户的地址存在 publicKeyX_notZero(pb, accountBefore_To.publicKey.x, FMT(prefix, ".publicKeyX_notZero")), // Fee as float // 确定费用值,并且对费用值的精确度进行约束 fFee(pb, constants, Float16Encoding, FMT(prefix, ".fFee")), requireAccuracyFee(pb, fFee.value(), fee.packed, Float16Accuracy, NUM_BITS_AMOUNT, FMT(prefix, ".requireAccuracyFee")), // Amount as float // 确定交易金额,并且对交易金额进行的精确度进行约束 fAmount(pb, constants, Float24Encoding, FMT(prefix, ".fTansAmount")), requireAccuracyAmount(pb, fAmount.value(), amount.packed, Float24Accuracy, NUM_BITS_AMOUNT, FMT(prefix, ".requireAccuracyAmount")), // Fee payment from From to the operator // from支付交易费用 feePayment(pb, NUM_BITS_AMOUNT, balanceFBefore_From.balance, balanceBefore_O.balance, fFee.value(), FMT(prefix, ".feePayment")), // Transfer from From to To // transfer转账费用支付 transferPayment(pb, NUM_BITS_AMOUNT, balanceTBefore_From.balance, balanceTBefore_To.balance, fAmount.value(), FMT(prefix, ".transferPayment")), // Increase the nonce of From by 1 (unless it's a conditional transfer) // nonce++ nonce_From_after(pb, accountBefore_From.nonce, signatureVerifier.result(), NUM_BITS_NONCE, FMT(prefix, ".nonce_From_after")), // Update User From updateBalanceF_From(pb, accountBefore_From.balancesRoot, feeTokenID.bits, {balanceFBefore_From.balance, balanceFBefore_From.tradingHistory}, {feePayment.X, balanceFBefore_From.tradingHistory}, FMT(prefix, ".updateBalanceF_From")), updateBalanceT_From(pb, updateBalanceF_From.result(), tokenID.bits, {balanceTBefore_From.balance, balanceTBefore_From.tradingHistory}, {transferPayment.X, balanceTBefore_From.tradingHistory}, FMT(prefix, ".updateBalanceT_From")), updateAccount_From(pb, accountsMerkleRoot, accountID_From.bits, {accountBefore_From.publicKey.x, accountBefore_From.publicKey.y, accountBefore_From.nonce, accountBefore_From.balancesRoot}, {accountBefore_From.publicKey.x, accountBefore_From.publicKey.y, nonce_From_after.result(), updateBalanceT_From.result()}, FMT(prefix, ".updateAccount_From")), // Update User To updateBalanceT_To(pb, accountBefore_To.balancesRoot, tokenID.bits, {balanceTBefore_To.balance, balanceTBefore_To.tradingHistory}, {transferPayment.Y, balanceTBefore_To.tradingHistory}, FMT(prefix, ".updateBalanceT_To")), updateAccount_To(pb, updateAccount_From.result(), accountID_To.bits, {accountBefore_To.publicKey.x, accountBefore_To.publicKey.y, accountBefore_To.nonce, accountBefore_To.balancesRoot}, {accountBefore_To.publicKey.x, accountBefore_To.publicKey.y, accountBefore_To.nonce, updateBalanceT_To.result()}, FMT(prefix, ".updateAccount_To")), // Update Operator // 更新operator的balanceRoot updateBalanceF_O(pb, operatorBalancesRoot, feeTokenID.bits, {balanceBefore_O.balance, balanceBefore_O.tradingHistory}, {feePayment.Y, balanceBefore_O.tradingHistory}, FMT(prefix, ".updateBalanceF_O")) { } void generate_r1cs_witness(const InternalTransfer& transfer) { // User From state balanceFBefore_From.generate_r1cs_witness(transfer.balanceUpdateF_From.before); balanceTBefore_From.generate_r1cs_witness(transfer.balanceUpdateT_From.before); accountBefore_From.generate_r1cs_witness(transfer.accountUpdate_From.before); // User To state balanceTBefore_To.generate_r1cs_witness(transfer.balanceUpdateT_To.before); accountBefore_To.generate_r1cs_witness(transfer.accountUpdate_To.before); // Operator state balanceBefore_O.generate_r1cs_witness(transfer.balanceUpdateF_O.before); // Inputs accountID_From.generate_r1cs_witness(pb, transfer.accountUpdate_From.accountID); accountID_To.generate_r1cs_witness(pb, transfer.accountUpdate_To.accountID); tokenID.generate_r1cs_witness(pb, transfer.balanceUpdateT_From.tokenID); amount.generate_r1cs_witness(pb, transfer.amount); feeTokenID.generate_r1cs_witness(pb, transfer.balanceUpdateF_From.tokenID); fee.generate_r1cs_witness(pb, transfer.fee); type.generate_r1cs_witness(pb, transfer.type); // Signature hash.generate_r1cs_witness(); signatureVerifier.generate_r1cs_witness(transfer.signature); // Type signatureInvalid.generate_r1cs_witness(); pb.val(numConditionalTransfersAfter.sum) = transfer.numConditionalTransfersAfter; type_eq_signatureInvalid.generate_r1cs_witness(); // User To account check publicKeyX_notZero.generate_r1cs_witness(); // Fee as float fFee.generate_r1cs_witness(toFloat(transfer.fee, Float16Encoding)); requireAccuracyFee.generate_r1cs_witness(); // Amount as float fAmount.generate_r1cs_witness(toFloat(transfer.amount, Float24Encoding)); requireAccuracyAmount.generate_r1cs_witness(); // Fee payment from From to the operator feePayment.generate_r1cs_witness(); // Transfer from From to To transferPayment.generate_r1cs_witness(); // Increase the nonce of From by 1 nonce_From_after.generate_r1cs_witness(); // Update User From updateBalanceF_From.generate_r1cs_witness(transfer.balanceUpdateF_From.proof); updateBalanceT_From.generate_r1cs_witness(transfer.balanceUpdateT_From.proof); updateAccount_From.generate_r1cs_witness(transfer.accountUpdate_From.proof); // Update User To updateBalanceT_To.generate_r1cs_witness(transfer.balanceUpdateT_To.proof); updateAccount_To.generate_r1cs_witness(transfer.accountUpdate_To.proof); // Update Operator updateBalanceF_O.generate_r1cs_witness(transfer.balanceUpdateF_O.proof); } void generate_r1cs_constraints() { // Inputs accountID_From.generate_r1cs_constraints(true); accountID_To.generate_r1cs_constraints(true); tokenID.generate_r1cs_constraints(true); amount.generate_r1cs_constraints(true); feeTokenID.generate_r1cs_constraints(true); fee.generate_r1cs_constraints(true); type.generate_r1cs_constraints(true); // Signature hash.generate_r1cs_constraints(); signatureVerifier.generate_r1cs_constraints(); // Type signatureInvalid.generate_r1cs_constraints(); numConditionalTransfersAfter.generate_r1cs_constraints(); type_eq_signatureInvalid.generate_r1cs_constraints(); // User To account check publicKeyX_notZero.generate_r1cs_constraints(); // Fee as float fFee.generate_r1cs_constraints(); requireAccuracyFee.generate_r1cs_constraints(); // Amount as float fAmount.generate_r1cs_constraints(); requireAccuracyAmount.generate_r1cs_constraints(); // Fee payment from From to the operator feePayment.generate_r1cs_constraints(); // Transfer from From to To transferPayment.generate_r1cs_constraints(); // Increase the nonce of From by 1 nonce_From_after.generate_r1cs_constraints(); // Update User From updateBalanceF_From.generate_r1cs_constraints(); updateBalanceT_From.generate_r1cs_constraints(); updateAccount_From.generate_r1cs_constraints(); // Update User To updateBalanceT_To.generate_r1cs_constraints(); updateAccount_To.generate_r1cs_constraints(); // Update Operator updateBalanceF_O.generate_r1cs_constraints(); } const std::vector<VariableArrayT> getPublicData() const { return {type.bits, accountID_From.bits, accountID_To.bits, VariableArrayT(2, constants.zero), tokenID.bits, VariableArrayT(2, constants.zero), feeTokenID.bits, fAmount.bits(), fFee.bits()}; } const VariableT& getNewAccountsRoot() const { return updateAccount_To.result(); } const VariableT& getNewOperatorBalancesRoot() const { return updateBalanceF_O.result(); } const VariableT& getNewNumConditionalTransfers() const { return numConditionalTransfersAfter.result(); } }; class InternalTransferCircuit : public Circuit { public: PublicDataGadget publicData; Constants constants; jubjub::Params params; // State AccountGadget accountBefore_O; // Inputs DualVariableGadget exchangeID; DualVariableGadget merkleRootBefore; DualVariableGadget merkleRootAfter; std::unique_ptr<libsnark::dual_variable_gadget<FieldT>> numConditionalTransfers; DualVariableGadget operatorAccountID; // Operator account check RequireNotZeroGadget publicKeyX_notZero; // Internal transfers bool onchainDataAvailability; unsigned int numTransfers; std::vector<InternalTransferGadget> transfers; // Update Operator std::unique_ptr<UpdateAccountGadget> updateAccount_O; InternalTransferCircuit(ProtoboardT &pb, const std::string &prefix) : Circuit(pb, prefix), publicData(pb, FMT(prefix, ".publicData")), constants(pb, FMT(prefix, ".constants")), // State accountBefore_O(pb, FMT(prefix, ".accountBefore_O")), // Inputs exchangeID(pb, NUM_BITS_EXCHANGE_ID, FMT(prefix, ".exchangeID")), merkleRootBefore(pb, 256, FMT(prefix, ".merkleRootBefore")), merkleRootAfter(pb, 256, FMT(prefix, ".merkleRootAfter")), operatorAccountID(pb, NUM_BITS_ACCOUNT, FMT(prefix, ".operatorAccountID")), // Operator account check publicKeyX_notZero(pb, accountBefore_O.publicKey.x, FMT(prefix, ".publicKeyX_notZero")) { } void generateConstraints(bool onchainDataAvailability, unsigned int blockSize) override { this->onchainDataAvailability = onchainDataAvailability; this->numTransfers = blockSize; constants.generate_r1cs_constraints(); // Inputs exchangeID.generate_r1cs_constraints(true); merkleRootBefore.generate_r1cs_constraints(true); merkleRootAfter.generate_r1cs_constraints(true); operatorAccountID.generate_r1cs_constraints(true); // Operator account check publicKeyX_notZero.generate_r1cs_constraints(); // Internal transfers transfers.reserve(numTransfers); for (size_t j = 0; j < numTransfers; j++) { // 每次循环更新transAccountsRoot以及operatorBalanceRoot,最后再对operatorAccountRoot的值进行更新 VariableT transAccountsRoot = (j == 0) ? merkleRootBefore.packed : transfers.back().getNewAccountsRoot(); VariableT transOperatorBalancesRoot = (j == 0) ? accountBefore_O.balancesRoot : transfers.back().getNewOperatorBalancesRoot(); transfers.emplace_back( pb, params, constants, transAccountsRoot, transOperatorBalancesRoot, exchangeID.packed, // 获取验签不通过的转账交易的数量 (j == 0) ? constants.zero : transfers.back().getNewNumConditionalTransfers(), std::string("transfer_") + std::to_string(j)); transfers.back().generate_r1cs_constraints(); } // Update Operator // 最后更新operatorAccountRoot updateAccount_O.reset(new UpdateAccountGadget(pb, transfers.back().getNewAccountsRoot(), operatorAccountID.bits, {accountBefore_O.publicKey.x, accountBefore_O.publicKey.y, accountBefore_O.nonce, accountBefore_O.balancesRoot}, {accountBefore_O.publicKey.x, accountBefore_O.publicKey.y, accountBefore_O.nonce, transfers.back().getNewOperatorBalancesRoot()}, FMT(annotation_prefix, ".updateAccount_O"))); updateAccount_O->generate_r1cs_constraints(); // Num conditional transfers // 验签通过的内部交易数量 numConditionalTransfers.reset(new libsnark::dual_variable_gadget<FieldT>( pb, transfers.back().getNewNumConditionalTransfers(), 32, ".numConditionalTransfers") ); numConditionalTransfers->generate_r1cs_constraints(true); // Public data publicData.add(exchangeID.bits); publicData.add(merkleRootBefore.bits); publicData.add(merkleRootAfter.bits); publicData.add(numConditionalTransfers->bits); if (onchainDataAvailability) { publicData.add(operatorAccountID.bits); for (const InternalTransferGadget& transfer : transfers) { publicData.add(transfer.getPublicData()); } } publicData.generate_r1cs_constraints(); // Check the new merkle root requireEqual(pb, updateAccount_O->result(), merkleRootAfter.packed, "newMerkleRoot"); } bool generateWitness(const Loopring::InternalTransferBlock &block) { constants.generate_r1cs_witness(); // State accountBefore_O.generate_r1cs_witness(block.accountUpdate_O.before); // Inputs exchangeID.generate_r1cs_witness(pb, block.exchangeID); merkleRootBefore.generate_r1cs_witness(pb, block.merkleRootBefore); merkleRootAfter.generate_r1cs_witness(pb, block.merkleRootAfter); operatorAccountID.generate_r1cs_witness(pb, block.operatorAccountID); // Operator account check publicKeyX_notZero.generate_r1cs_witness(); // Internal transfers #ifdef MULTICORE #pragma omp parallel for #endif for (unsigned int i = 0; i < block.transfers.size(); i++) { transfers[i].generate_r1cs_witness(block.transfers[i]); } // Update operator updateAccount_O->generate_r1cs_witness(block.accountUpdate_O.proof); // Num conditional transfers numConditionalTransfers->generate_r1cs_witness_from_packed(); // Public data publicData.generate_r1cs_witness(); return true; } bool generateWitness(const json& input) override { return generateWitness(input.get<Loopring::InternalTransferBlock>()); } BlockType getBlockType() override { return BlockType::InternalTransfer; } unsigned int getBlockSize() override { return numTransfers; } void printInfo() override { std::cout << pb.num_constraints() << " constraints (" << (pb.num_constraints() / numTransfers) << "/transfer)" << std::endl; } }; } // namespace Loopring #endif
explicit_solver_strategy.h
// // Authors: // Miguel Angel Celigueta maceli@cimne.upc.edu // Miquel Santasusana msantasusana@cimne.upc.edu // #if !defined(KRATOS_EXPLICIT_SOLVER_STRATEGY) #define KRATOS_EXPLICIT_SOLVER_STRATEGY // Project includes #include "utilities/timer.h" #include "custom_elements/Particle_Contact_Element.h" #include "includes/variables.h" #include "includes/deprecated_variables.h" /* System includes */ #include <limits> #include <iostream> #include <iomanip> #include <time.h> /* External includes */ #ifdef _OPENMP #include <omp.h> #endif #define CUSTOMTIMER 0 // ACTIVATES AND DISABLES ::TIMER::::: #include "includes/define.h" #include "utilities/openmp_utils.h" #include "includes/model_part.h" #include "solving_strategies/strategies/solving_strategy.h" #include "solving_strategies/schemes/scheme.h" #include "custom_strategies/schemes/dem_integration_scheme.h" #include "custom_utilities/create_and_destroy.h" #include "custom_utilities/dem_fem_utilities.h" #include "custom_utilities/GeometryFunctions.h" #include "custom_utilities/inlet.h" #include "custom_elements/cluster3D.h" #include "custom_elements/rigid_body_element.h" ////Cfeng #include "custom_utilities/dem_fem_search.h" #include "custom_utilities/discrete_particle_configure.h" #include "custom_utilities/rigid_face_geometrical_object_configure.h" #ifdef USING_CGAL #include <CGAL/spatial_sort.h> #endif /* Timer defines */ #ifdef CUSTOMTIMER #define KRATOS_TIMER_START(t) Timer::Start(t); #define KRATOS_TIMER_STOP(t) Timer::Stop(t); #else #define KRATOS_TIMER_START(t) #define KRATOS_TIMER_STOP(t) #endif namespace Kratos { class ExplicitSolverSettings { public: KRATOS_CLASS_POINTER_DEFINITION(ExplicitSolverSettings); ExplicitSolverSettings() { } ~ExplicitSolverSettings() { } ModelPart* r_model_part; ModelPart* contact_model_part; ModelPart* fem_model_part; ModelPart* cluster_model_part; ModelPart* inlet_model_part; }; class KRATOS_API(DEM_APPLICATION) ExplicitSolverStrategy { public: typedef ModelPart::NodesContainerType NodesArrayType; typedef ModelPart::ElementsContainerType ElementsArrayType; typedef ElementsArrayType::iterator ElementsIterator; typedef ModelPart::ConditionsContainerType ConditionsArrayType; typedef ModelPart::NodesContainerType::ContainerType NodesContainerType; typedef ModelPart::ElementsContainerType::ContainerType ElementsContainerType; typedef ModelPart::ConditionsContainerType::ContainerType ConditionsContainerType; typedef SpatialSearch::ResultElementsContainerType ResultElementsContainerType; typedef SpatialSearch::VectorResultElementsContainerType VectorResultElementsContainerType; typedef SpatialSearch::RadiusArrayType RadiusArrayType; typedef SpatialSearch::DistanceType DistanceType; typedef SpatialSearch::VectorDistanceType VectorDistanceType; typedef SpatialSearch::ResultConditionsContainerType ResultConditionsContainerType; typedef SpatialSearch::VectorResultConditionsContainerType VectorResultConditionsContainerType; typedef PointerVectorSet<Properties, IndexedObject> PropertiesContainerType; typedef PropertiesContainerType::iterator PropertiesIterator; typedef DiscreteParticleConfigure<3> ElementConfigureType; typedef RigidFaceGeometricalObjectConfigure<3> RigidFaceGeometricalConfigureType; typedef Variable<double> ComponentOf3ComponentsVariableType; /// Pointer definition of ExplicitSolverStrategy KRATOS_CLASS_POINTER_DEFINITION(ExplicitSolverStrategy); ExplicitSolverStrategy() { } ExplicitSolverStrategy(ExplicitSolverSettings& settings, const double max_delta_time, const int n_step_search, const double safety_factor, const int delta_option, ParticleCreatorDestructor::Pointer p_creator_destructor, DEM_FEM_Search::Pointer p_dem_fem_search, SpatialSearch::Pointer pSpSearch, Parameters strategy_parameters) { mParameters = strategy_parameters; mDeltaOption = delta_option; mpParticleCreatorDestructor = p_creator_destructor; mpDemFemSearch = p_dem_fem_search; mpSpSearch = pSpSearch; //Also checks old flag name for backward compatibility issues. if(mParameters["do_search_dem_neighbours"].GetBool()) { mDoSearchNeighbourElements = true; } else mDoSearchNeighbourElements = false; p_creator_destructor->SetDoSearchNeighbourElements(mDoSearchNeighbourElements); if(mParameters["do_search_fem_neighbours"].GetBool()) mDoSearchNeighbourFEMElements = true; else mDoSearchNeighbourFEMElements = false; mMaxTimeStep = max_delta_time; mNStepSearch = n_step_search; mSafetyFactor = safety_factor; mpDem_model_part = &(*(settings.r_model_part)); KRATOS_ERROR_IF(mpDem_model_part == NULL) << "Undefined settings.r_model_part in ExplicitSolverStrategy constructor" << std::endl; mpContact_model_part = &(*(settings.contact_model_part)); KRATOS_ERROR_IF(mpContact_model_part == NULL) << "Undefined settings.contact_model_part in ExplicitSolverStrategy constructor" << std::endl; mpFem_model_part = &(*(settings.fem_model_part)); KRATOS_ERROR_IF(mpFem_model_part == NULL) << "Undefined settings.fem_model_part in ExplicitSolverStrategy constructor" << std::endl; mpCluster_model_part = &(*(settings.cluster_model_part)); KRATOS_ERROR_IF(mpCluster_model_part == NULL) << "Undefined settings.cluster_model_part in ExplicitSolverStrategy constructor" << std::endl; mpInlet_model_part = &(*(settings.inlet_model_part)); KRATOS_ERROR_IF(mpInlet_model_part == NULL) << "Undefined settings.inlet_model_part in ExplicitSolverStrategy constructor" << std::endl; if(mParameters["RemoveBallsInitiallyTouchingWalls"].GetBool()) mRemoveBallsInitiallyTouchingWallsOption = true; else mRemoveBallsInitiallyTouchingWallsOption = false; } /// Destructor. virtual ~ExplicitSolverStrategy() { //Timer::SetOuputFile("TimesPartialRelease"); //Timer::PrintTimingInformation(); } struct LessX { bool operator()(const SphericParticle* p, const SphericParticle* q) const {return p->GetGeometry()[0].Coordinates()[0] < q->GetGeometry()[0].Coordinates()[0];} }; struct LessY { bool operator()(const SphericParticle* p, const SphericParticle* q) const {return p->GetGeometry()[0].Coordinates()[1] < q->GetGeometry()[0].Coordinates()[1];} }; struct LessZ { bool operator()(const SphericParticle* p, const SphericParticle* q) const {return p->GetGeometry()[0].Coordinates()[2] < q->GetGeometry()[0].Coordinates()[2];} }; struct SpatialSortingTraits { typedef SphericParticle* Point_2; typedef LessX Less_x_2; typedef LessY Less_y_2; typedef LessZ Less_z_2; Less_x_2 less_x_2_object() const {return Less_x_2();} Less_y_2 less_y_2_object() const {return Less_y_2();} Less_z_2 less_z_2_object() const { return Less_z_2();} }; #ifdef USING_CGAL void ReorderParticles() { SpatialSortingTraits sst; CGAL::spatial_sort(mListOfSphericParticles.begin(), mListOfSphericParticles.end(), sst); } #endif template <class T> void RebuildListOfSphericParticles(ElementsArrayType& pElements, std::vector<T*>& rCustomListOfParticles){ KRATOS_TRY rCustomListOfParticles.resize(pElements.size()); #pragma omp parallel for for (int k = 0; k < (int)pElements.size(); k++){ ElementsArrayType::iterator particle_pointer_it = pElements.ptr_begin() + k; T* spheric_particle = dynamic_cast<T*>(&(*particle_pointer_it)); rCustomListOfParticles[k] = spheric_particle; } return; KRATOS_CATCH("") } void RebuildListOfDiscontinuumSphericParticles() { RebuildListOfSphericParticles<SphericParticle>(GetModelPart().GetCommunicator().LocalMesh().Elements(), mListOfSphericParticles); } void RebuildPropertiesProxyPointers(std::vector<SphericParticle*>& rCustomListOfSphericParticles); void SendProcessInfoToClustersModelPart(); void UpdateMaxIdOfCreatorDestructor(); void RepairPointersToNormalProperties(std::vector<SphericParticle*>& rCustomListOfSphericParticles); virtual void Initialize(); virtual void AttachSpheresToStickyWalls(); virtual void DisplayThreadInfo(); virtual void CalculateMaxTimeStep(); double CalculateMaxInletTimeStep(); virtual void InitializeClusters(); virtual void GetClustersForce(); virtual void GetRigidBodyElementsForce(); virtual double SolveSolutionStep(); void SearchDEMOperations(ModelPart& r_model_part, bool has_mpi = true); void SearchFEMOperations(ModelPart& r_model_part, bool has_mpi = true) ; virtual void ForceOperations(ModelPart& r_model_part); void InitialTimeStepCalculation(); //TODO: remove this one void GetForce(); void FastGetForce(); virtual void PerformTimeIntegrationOfMotion(int StepFlag = 0); void InitializeSolutionStep(); virtual void BoundingBoxUtility(bool is_time_to_mark_and_remove = true); virtual void FinalizeSolutionStep(); void InitializeElements(); void InitializeDEMElements(); void InitializeFEMElements(); //void InitializeRigidBodyElements(); void InitializeFEMWallsAsRigidBodyElements(ModelPart::SubModelPartsContainerType::iterator& sub_model_part); void MarkToDeleteAllSpheresInitiallyIndentedWithFEM(ModelPart& rSpheresModelPart); void ComputeNodalArea(); void ComputeNormalPressureVectorField(); virtual void CalculateConditionsRHSAndAdd(); void ClearFEMForces(); void CalculateNodalPressuresAndStressesOnWalls(); void SetFlagAndVariableToNodes(const Kratos::Flags& r_flag_name, ComponentOf3ComponentsVariableType& r_variable_to_set, const double value, NodesArrayType& r_nodes_array); void SetVariableToNodes(ComponentOf3ComponentsVariableType& r_variable_to_set, const double value, NodesArrayType& r_nodes_array); void ResetPrescribedMotionFlagsRespectingImposedDofs(); void ApplyPrescribedBoundaryConditions(); void ApplyInitialConditions(); void SetSearchRadiiOnAllParticles(ModelPart& r_model_part, const double added_search_distance = 0.0, const double amplification = 1.0); void SetNormalRadiiOnAllParticles(ModelPart& r_model_part); void SetSearchRadiiWithFemOnAllParticles(ModelPart& r_model_part, const double added_search_distance = 0.0, const double amplification = 1.0); virtual void SearchNeighbours(); virtual void ComputeNewNeighboursHistoricalData(); virtual void CreateContactElements(); void InitializeContactElements(); // void ContactInitializeSolutionStep(); void PrepareContactElementsForPrinting(); virtual void ComputeNewRigidFaceNeighboursHistoricalData(); virtual void SearchRigidFaceNeighbours(); void CheckHierarchyWithCurrentNeighbours(); /* This should work only with one iteration, but it with mpi does not */ void CalculateInitialMaxIndentations(const ProcessInfo& r_process_info); void PrepareContactModelPart(ModelPart& r_model_part, ModelPart& mcontacts_model_part); void PrepareElementsForPrinting(); void SynchronizeHistoricalVariables(ModelPart& r_model_part); void SynchronizeRHS(ModelPart& r_model_part); void CleanEnergies(); ModelPart& GetModelPart() { return (*mpDem_model_part);} ModelPart& GetFemModelPart() { return (*mpFem_model_part);} ModelPart& GetContactModelPart() { return (*mpContact_model_part);} ModelPart& GetClusterModelPart() { return (*mpCluster_model_part);} ModelPart& GetInletModelPart() { return (*mpInlet_model_part);} ModelPart& GetRigidBodyModelPart() { return (*mpRigidBody_model_part);} VectorResultElementsContainerType& GetResults() { return (mResults);} VectorDistanceType& GetResultsDistances() { return (mResultsDistances);} RadiusArrayType& GetArrayOfAmplifiedRadii() { return (mArrayOfAmplifiedRadii);} int& GetNStepSearch() { return (mNStepSearch);} int& GetSearchControl() { return mSearchControl;} int& GetNumberOfThreads() { return (mNumberOfThreads);} double& GetMaxTimeStep() { return (mMaxTimeStep);} double& GetSafetyFactor() { return (mSafetyFactor);} int& GetDeltaOption() { return (mDeltaOption);} ParticleCreatorDestructor::Pointer& GetParticleCreatorDestructor() { return (mpParticleCreatorDestructor);} SpatialSearch::Pointer& GetSpSearch() { return (mpSpSearch);} VectorResultConditionsContainerType& GetRigidFaceResults() { return (mRigidFaceResults);} VectorDistanceType& GetRigidFaceResultsDistances() { return (mRigidFaceResultsDistances);} DEM_FEM_Search::Pointer& GetDemFemSearch() { return (mpDemFemSearch);} virtual ElementsArrayType& GetElements(ModelPart& r_model_part) { return r_model_part.GetCommunicator().LocalMesh().Elements();} virtual ElementsArrayType& GetAllElements(ModelPart& r_model_part) { return r_model_part.Elements(); } protected: Parameters mParameters; bool mRemoveBallsInitiallyTouchingWallsOption; VectorResultElementsContainerType mResults; VectorDistanceType mResultsDistances; RadiusArrayType mArrayOfAmplifiedRadii; int mNStepSearch; int mSearchControl; int mNumberOfThreads; double mMaxTimeStep; double mSafetyFactor; int mDeltaOption; ParticleCreatorDestructor::Pointer mpParticleCreatorDestructor; DEM_FEM_Search::Pointer mpDemFemSearch; SpatialSearch::Pointer mpSpSearch; bool mDoSearchNeighbourElements; bool mDoSearchNeighbourFEMElements; VectorResultConditionsContainerType mRigidFaceResults; VectorDistanceType mRigidFaceResultsDistances; ModelPart *mpFem_model_part; ModelPart *mpDem_model_part; ModelPart *mpInlet_model_part; ModelPart *mpContact_model_part; ModelPart *mpCluster_model_part; ModelPart *mpRigidBody_model_part; std::vector<SphericParticle*> mListOfSphericParticles; std::vector<SphericParticle*> mListOfGhostSphericParticles; }; // Class ExplicitSolverStrategy } // namespace Kratos. #endif // KRATOS_EXPLICIT_SOLVER_STRATEGY defined
GB_unop__identity_int8_int16.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_int8_int16) // op(A') function: GB (_unop_tran__identity_int8_int16) // C type: int8_t // A type: int16_t // cast: int8_t cij = (int8_t) aij // unaryop: cij = aij #define GB_ATYPE \ int16_t #define GB_CTYPE \ int8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ int8_t z = (int8_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int16_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int8_t z = (int8_t) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT8 || GxB_NO_INT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_int8_int16) ( int8_t *Cx, // Cx and Ax may be aliased const int16_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int16_t aij = Ax [p] ; int8_t z = (int8_t) aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int16_t aij = Ax [p] ; int8_t z = (int8_t) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_int8_int16) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__pow_uint8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__pow_uint8 // A.*B function (eWiseMult): GB_AemultB__pow_uint8 // A*D function (colscale): (none) // D*A function (rowscale): (node) // C+=B function (dense accum): GB_Cdense_accumB__pow_uint8 // C+=b function (dense accum): GB_Cdense_accumb__pow_uint8 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__pow_uint8 // C=scalar+B GB_bind1st__pow_uint8 // C=scalar+B' GB_bind1st_tran__pow_uint8 // C=A+scalar GB_bind2nd__pow_uint8 // C=A'+scalar GB_bind2nd_tran__pow_uint8 // C type: uint8_t // A type: uint8_t // B,b type: uint8_t // BinaryOp: cij = GB_pow_uint8 (aij, bij) #define GB_ATYPE \ uint8_t #define GB_BTYPE \ uint8_t #define GB_CTYPE \ uint8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint8_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = GB_pow_uint8 (x, y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_POW || GxB_NO_UINT8 || GxB_NO_POW_UINT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__pow_uint8 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__pow_uint8 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__pow_uint8 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint8_t uint8_t bwork = (*((uint8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *GB_RESTRICT Cx = (uint8_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (node) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *GB_RESTRICT Cx = (uint8_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__pow_uint8 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__pow_uint8 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__pow_uint8 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t x = (*((uint8_t *) x_input)) ; uint8_t *Bx = (uint8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; uint8_t bij = Bx [p] ; Cx [p] = GB_pow_uint8 (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__pow_uint8 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t *Ax = (uint8_t *) Ax_input ; uint8_t y = (*((uint8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint8_t aij = Ax [p] ; Cx [p] = GB_pow_uint8 (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = Ax [pA] ; \ Cx [pC] = GB_pow_uint8 (x, aij) ; \ } GrB_Info GB_bind1st_tran__pow_uint8 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t x = (*((const uint8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = Ax [pA] ; \ Cx [pC] = GB_pow_uint8 (aij, y) ; \ } GrB_Info GB_bind2nd_tran__pow_uint8 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t y = (*((const uint8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
csr_matvec_oomp.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ /****************************************************************************** * * Matvec functions for hypre_CSRMatrix class. * *****************************************************************************/ #include "seq_mv.h" #include "_hypre_utilities.hpp" #if defined(HYPRE_USING_DEVICE_OPENMP) /*-------------------------------------------------------------------------- * hypre_CSRMatrixMatvec *--------------------------------------------------------------------------*/ /* y[offset:end] = alpha*A[offset:end,:]*x + beta*b[offset:end] */ HYPRE_Int hypre_CSRMatrixMatvecOMPOffload( HYPRE_Int trans, HYPRE_Complex alpha, hypre_CSRMatrix *A, hypre_Vector *x, HYPRE_Complex beta, hypre_Vector *y, HYPRE_Int offset ) { hypre_CSRMatrix *B; if (trans) { hypre_CSRMatrixTransposeDevice(A, &B, 1); HYPRE_CUDA_CALL(cudaDeviceSynchronize()); } else { B = A; } HYPRE_Int A_nrows = hypre_CSRMatrixNumRows(B); HYPRE_Complex *A_data = hypre_CSRMatrixData(B); HYPRE_Int *A_i = hypre_CSRMatrixI(B); HYPRE_Int *A_j = hypre_CSRMatrixJ(B); HYPRE_Complex *x_data = hypre_VectorData(x); HYPRE_Complex *y_data = hypre_VectorData(y); HYPRE_Int i; #pragma omp target teams distribute parallel for private(i) is_device_ptr(A_data, A_i, A_j, y_data, x_data) for (i = offset; i < A_nrows; i++) { HYPRE_Complex tempx = 0.0; HYPRE_Int j; for (j = A_i[i]; j < A_i[i+1]; j++) { tempx += A_data[j] * x_data[A_j[j]]; } y_data[i] = alpha*tempx + beta*y_data[i]; } /* HYPRE_CUDA_CALL(cudaDeviceSynchronize()); */ return hypre_error_flag; } #endif /* #if defined(HYPRE_USING_DEVICE_OPENMP) */
omp-unique-threadid.c
/***************************************************************************************** Example 1.1 : omp-unique-threadid.c Objective : Write a simple OpenMP program to print unique number for each thread started by the #pragma parallel. This example demonstrates the use of OpenMP PARALLEL Directive and omp_get_thread_num() call Input : Set the number of threads to use by means of the OMP_NUM_THREADS environment variable. For C shell use command : setenv OMP_NUM_THREADS 4 For bash shell use command : export OMP_NUM_THREADS=4 Output : Each thread prints its thread id. Created : Aug 2011 Author : RarchK ******************************************************************************************/ #include <stdio.h> #include <stdlib.h> #include <omp.h> /* Main Program */ main(int argc,char **argv) { int threadid,Noofthreads; printf("\n\t\t---------------------------------------------------------------------------"); printf("\n\t\t Email : RarchK"); printf("\n\t\t---------------------------------------------------------------------------"); printf("\n\t\t Objective : OpenMP program to print unique thread identifier for "); printf("\n\t\t each thread using OpenMP PARALLEL directive."); printf("\n\t\t..........................................................................\n"); printf("\n\n\t\t Master thread prints this before forking the team of thread \n"); /* Set the number of threads */ /*omp_set_num_threads(4);*/ /* OpenMP Parallel Construct : Fork a team of threads */ #pragma omp parallel private(threadid) { /* Obtain the thread id */ threadid = omp_get_thread_num(); /* Each Thread Prints Its Threadid */ printf("\n\t\t My thread id is : %d\n", threadid); } /* All thread join Master thread */ printf("\n\t\t Master thread prints this after the end parallel region \n \n"); }
mkldnn_os.h
/******************************************************************************* * Copyright 2017 NEC Labs America * MODIFICATIONS Copyright 2019 NEC Labs America * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. *******************************************************************************/ /** \file * handle various compiler/os retrictions */ #ifndef _MKLDNN_OS_H_ #define _MKLDNN_OS_H_ //#include "os_common.hpp" // not available -- we use mkldnn public API only. #if 1 #if defined(__ve) #define strnlen strnlen_s #endif // How is the restrict keyword handled? (disallow it as you encounter errors, please) #if defined(_SX) #elif defined(__clang) // maybe this accepts restrict 'as is' //#elif defined(__ve) // restrict is allowed // #ifndef __restrict // #define __restrict restrict /* ve/musl/include/stdlib.h uses __restrict !!! */ // #elif defined(__ve) // // vednn.h also has this logic, don't redefine // # if defined(__cplusplus) # ifndef __restrict //# warning "nc++ restrict-->__restrict" # define restrict __restrict # endif # else # if !defined(restrict) //# warning "ncc restrict-->__restrict" # define restrict __restrict // ncc uses __restrict in C headers # endif # endif #elif defined(__INTEL_COMPILER) || defined(__GNUC__) #define restrict /*no-restrict*/ #elif defined(WIN32) // ??? #else // ??? #endif // restrict keyword handling // Any restrictions on the alignas attribute? #ifdef __ve #define alignas(x) alignas((x) > 16 ? 16 : (x)) #endif #endif // ENABLE_OPT_PRAGMAS // set to 0 to debug pragma-related incorrect assumptions #if !defined(ENABLE_OPT_PRAGMAS) //#warning "Unknown system: optimization pragmas NOT USED" //#define ENABLE_OPT_PRAGMAS 0/*XXX*/ #define ENABLE_OPT_PRAGMAS 1 #endif // ENABLE_OMP defaults to 1 #if !defined(ENABLE_OMP) #if defined(_SX) #elif defined(__ve) // OMP is not yet supported by ncc/nc++ //#define ENABLE_OMP 0 // at Dec. 25th 2017 release, ncc may support OMP #elif defined(__INTEL_COMPILER) #elif defined(__GNUC__) #else #endif #if !defined(ENABLE_OMP) #define ENABLE_OMP 1 #endif #endif // -------- compiler-specific pragmas -------- // __ve compile does something with pragma omp, but it is not officially supported, // so we use C++11 _Pragma to emit pragmas from macros and customize pragmas to // particular compilers. // // Allocation directives: // VREG : hint that array fits into one simd register // There may be many conditions on array access! // ALLOC_ON_VREG : hint that array fits into multiple simd registers // ALLOC_ON_ADB : hint that array should be "cached" in special memory bank. // // Loop directives apply to an IMMEDIATELY FOLLOWING loop: // ShortLoop : hint that for-loop limit is less than max simd register length // RETAIN : hint that array should be kept accesible (cached) // IVDEP : pretend all ptrs are independent (restrict) // // TODO: SX pre-loop macros must be SINGLE ones, because sxcc REQUIRES // multiple #pragma cdir to be combined, comma-separated. // So you can only use ONE pre-loop macro. If 2 macros, // compiler docs say **both** will be ignored! // // FIXME SX alloc_on_vreg 2nd arg must be a compile-time constant // // Oh! ALLOC_ON_VREG cannot "decay" into RETAIN, because syntax is different // ----------------------------------- //#define BENCHDNN_YPRAGMA(str) do{int ypr=str;}while(0); #define BENCHDNN_MPRAGMA(str) _Pragma(str) #define BENCHDNN_STRINGIZE(...) #__VA_ARGS__ #define PragmaQuote(...) BENCHDNN_MPRAGMA(BENCHDNN_STRINGIZE(__VA_ARGS__)) #if ENABLE_OPT_PRAGMAS && defined(_SX) // SX preprocessor generates _Pragma(XXX) and sxc++ might be ignoring // *some*, based on failure to produce some warning messages. //#warning "SX optimization pragmas IN EFFECT" # define VREG(...) PragmaQuote(cdir vreg(__VA_ARGS__)) # define ALLOC_ON_VREG(...) PragmaQuote(cdir alloc_on_vreg(__VA_ARGS__)) # define ALLOC_ON_ADB(...) PragmaQuote(cdir alloc_on_adb(__VA_ARGS__)) // Is there a pre-for-loop RETAIN for SX? For now, kludge as on_adb. # define RETAIN(...) PragmaQuote(cdir on_adb(__VA_ARGS__)) # define RETAIN1st(var,...) PragmaQuote(cdir on_adb(var)) # define ShortLoop() _Pragma("cdir shortloop") # define ShortLoopTest() /*?*/ # define IVDEP() _Pragma("cdir nodep") # define UNROLL(x) # define PRAGMA_UNROLL #elif ENABLE_OPT_PRAGMAS && defined(__ve) //# warning "__ve optimization pragmas IN EFFECT" # define VREG(...) PragmaQuote(_NEC vreg(__VA_ARGS__)) # define ALLOC_ON_VREG(...) # define ALLOC_ON_ADB(...) # define RETAIN(...) PragmaQuote(_NEC retain(__VA_ARGS__)) # define RETAIN1st(var,...) PragmaQuote(_NEC retain(var)) # define ShortLoop() _Pragma("_NEC shortloop") # define ShortLoopTest() _Pragma("_NEC shortloop_reduction") # define IVDEP() _Pragma("_NEC ivdep") # define UNROLL(x) PragmaQuote(_NEC unroll(x)) # define PRAGMA_UNROLL PragmaQuote(_NEC unroll(4)) #elif ENABLE_OPT_PRAGMAS && defined(__INTEL_COMPILER) // restrict keyword requires the "-restrict" CFLAG; __restrict__ works anyway # define restrict __restrict__ # define IVDEP() _Pragma("ivdep") # define UNROLL(x) PragmaQuote(unroll(x)) # define PRAGMA_UNROLL PragmaQuote(unroll) // TODO: # define VREG(...) # define ALLOC_ON_VREG(...) # define ALLOC_ON_ADB(...) # define RETAIN(...) # define ShortLoop() # define ShortLoopTest() #elif ENABLE_OPT_PRAGMAS && defined(_MSC_VER) && !defined(__clang__) && !defined(__INTEL_COMPILER) //-------------------------------------------- // taken from MSVC code in mkldnn_thread.hpp //# warning "MSVC still supports omp 2.0 only" # define collapse(x) //# define PRAGMA_OMP_SIMD(...) ... below //-------------------------------------------- # define UNROLL(x) # define PRAGMA_UNROLL # define VREG(...) # define ALLOC_ON_VREG(...) # define ALLOC_ON_ADB(...) # define RETAIN(...) # define ShortLoop() # define ShortLoopTest() #elif ENABLE_OPT_PRAGMAS && defined(__GNUC__) //#warning "__GNUC optimization pragmas IN EFFECT" # define VREG(...) # define ALLOC_ON_VREG(...) # define ALLOC_ON_ADB(...) # define RETAIN(...) # define ShortLoop() # define ShortLoopTest() # define IVDEP() _Pragma("GCC ivdep") #if __GNUC__ >= 8 # define UNROLL(x) PragmaQuote(GCC unroll x) # define PRAGMA_UNROLL PragmaQuote(GCC unroll 4) #else # define UNROLL(x) # define PRAGMA_UNROLL #endif #else /* A new system might begin by ignoring the optimization pragmas */ # warning "Please check if _Pragma macros can be defined for this platorm" # define VREG(...) # define ALLOC_ON_VREG(...) # define ALLOC_ON_ADB(...) # define RETAIN(...) # define ShortLoop() # define ShortLoopTest() # define IVDEP() # define UNROLL(x) # define PRAGMA_UNROLL #endif #if ENABLE_OMP # define OMP(...) PragmaQuote(omp __VA_ARGS__) //# if defined(__ve) //# warning "__ve enabling #pragma omp" //# endif # if defined(_SX) // no support for "simd" pragmas # elif defined(_MSC_VER) && !defined(__clang__) && !defined(__INTEL_COMPILER) # elif defined(__ve) # define PRAGMASIMD(...) PragmaQuote(simd __VA_ARGS__) //# warning "__ve (ncc) ignores simd directive in PRAGMA_OMP_SIMD(...) # define OMPSIMD(...) PragmaQuote(omp __VA_ARGS__) # define PRAGMA_OMP_SIMD(...) PragmaQuote(omp __VA_ARGS__) # else // defined(__GNUC) or intel or ... # define PRAGMASIMD(...) PragmaQuote(simd __VA_ARGS__) # define OMPSIMD(...) PragmaQuote(omp simd __VA_ARGS__) # define PRAGMA_OMP_SIMD(...) PragmaQuote(omp simd __VA_ARGS__) # endif #endif #ifndef PRAGMASIMD # define PRAGMASIMD(...) #endif #ifndef OMPSIMD # define OMPSIMD(...) #endif #ifndef PRAGMA_OMP_SIMD # define PRAGMA_OMP_SIMD(...) #endif #ifndef OMP # define OMP(...) #if defined(REF_LRN_HPP) // mostly ignore: show for cpu_engine compile at least # warning "not enabling #pragma omp (mkldnn_os.h)" #endif #endif #endif // _MKLDNN_OS_H_
logit_I8.c
#include<stdio.h> #include<stdlib.h> #include<math.h> #include <inttypes.h> #include <unistd.h> #include "logit_I8.h" static void __operation( int64_t a, double *ptr_c ) { double c; c = 1.0 / ( 1.0 + exp(-1.0 *a) ); *ptr_c = c; } int logit_I8( const int64_t * restrict in, uint64_t *nn_in, uint64_t nR, void *dummy, double * out, uint64_t *nn_out ) { int status = 0; int nT = sysconf(_SC_NPROCESSORS_ONLN); nT = 4; // undo hard coding #pragma omp parallel for schedule(static) num_threads(nT) for ( uint64_t i = 0; i < nR; i++ ) { int64_t inv; double outv; inv = in[i]; __operation(inv, &outv); out[i] = outv; } return status; }
mc.c
/** * mc.c * Authors: Yizhao Gao <yizhaotsccsj@gmail.com> * Date: {08/01/2017} */ #include <stdio.h> #include <stdlib.h> #include <random> #include <omp.h> #include "scan.h" using namespace std; void simulateEvents(int * nPop, int * simEvents, int locCount, int eventCount) { static std::random_device rd; static std::mt19937 rng(rd()); static std::discrete_distribution<int> d (nPop, nPop + locCount); for(int i = 0; i < locCount; i++) { simEvents[i] = 0; } for(int i = 0; i < eventCount; i++) { simEvents[d(rng)] ++; } } int * monteCarlo(double * x1, double * y1, double * x2, double * y2, int * nPop, int * popInW, int locCount, int popCount, int eventCount, double wSize, int wCount, int highLow, double elimIntersectOD, double * clusterLL, int nClusters, int nSim) { int * nExtreme; if(NULL == (nExtreme = (int *) malloc (nClusters * sizeof(int)))) { printf("ERROR: Out of memory at line %d in file %s\n", __LINE__, __FILE__); exit(1); } for(int i = 0; i < nClusters; i++) nExtreme[i] = 0; int * simEvents; if(NULL == (simEvents = (int *) malloc (locCount * sizeof(int)))) { printf("ERROR: Out of memory at line %d in file %s\n", __LINE__, __FILE__); exit(1); } int * simEventInW; double * simll; if(NULL == (simEventInW = (int *) malloc (locCount * wCount * sizeof(int)))) { printf("ERROR: Out of memory at line %d in file %s\n", __LINE__, __FILE__); exit(1); } if(NULL == (simll = (double *) malloc (locCount * wCount * sizeof(double)))) { printf("ERROR: Out of memory at line %d in file %s\n", __LINE__, __FILE__); exit(1); } double simMaxLL; for(int i = 0; i < nSim; i++) { simulateEvents(nPop, simEvents, locCount, eventCount); getECountOnly(x1, y1, x2, y2, simEvents, locCount, wSize, wCount, simEventInW, elimIntersectOD); loglikelihood(simll, popInW, simEventInW, locCount * wCount, popCount, eventCount, highLow); simMaxLL = -9999; //Possion's LL is larger than 0 for(int k = 0; k < locCount * wCount; k++) { if(simll[k] > 0 && simll[k] > simMaxLL) { simMaxLL = simll[k]; } } if(simMaxLL > 0) { for(int j = 0; j < nClusters; j++) { if(simMaxLL > clusterLL[j]) { nExtreme[j] ++; } } } } free(simEventInW); free(simll); free(simEvents); return nExtreme; } int * monteCarloOld(double * x1, double * y1, double * x2, double * y2, int * nPop, int locCount, int popCount, int eventCount, int * clusterEvent, int * center, double * cRadius, bool * highCluster, int nClusters, int nSim) { int * nExtreme; if(NULL == (nExtreme = (int *) malloc (nClusters * sizeof(int)))) { printf("ERROR: Out of memory at line %d in file %s\n", __LINE__, __FILE__); exit(1); } for(int i = 0; i < nClusters; i++) nExtreme[i] = 0; int * simEvents; if(NULL == (simEvents = (int *) malloc (locCount * sizeof(int)))) { printf("ERROR: Out of memory at line %d in file %s\n", __LINE__, __FILE__); exit(1); } for(int i = 0; i < nSim; i++) { simulateEvents(nPop, simEvents, locCount, eventCount); #pragma omp parallel for for(int j = 0; j < nClusters; j++) { double x1C = x1[center[j]]; double y1C = y1[center[j]]; double x2C = x2[center[j]]; double y2C = y2[center[j]]; double rad2 = cRadius[j] * cRadius[j]; int simEventInc = 0; for(int k = 0; k < locCount; k++) { if((x1[k] - x1C) * (x1[k] - x1C) + (y1[k] - y1C) * (y1[k] - y1C) + (x2[k] - x2C) * (x2[k] - x2C) + (y2[k] - y2C) * (y2[k] - y2C) <= rad2) { simEventInc += simEvents[k]; } } if(highCluster[j] && (simEventInc >= clusterEvent[j])) { nExtreme[j] ++; } else if(!highCluster[j] && (simEventInc <= clusterEvent[j])) { nExtreme[j] ++; } } } free(simEvents); return nExtreme; }
tool_available.c
// The OpenMP standard defines 3 ways of providing ompt_start_tool: // 1. "statically-linking the tool’s definition of ompt_start_tool into an OpenMP application" // RUN: %libomp-compile -DCODE -DTOOL && %libomp-run | FileCheck %s // Note: We should compile the tool without -fopenmp as other tools developer // would do. Otherwise this test may pass for the wrong reasons on Darwin. // RUN: %clang %flags -DTOOL -shared -fPIC %s -o %T/tool.so // 2. "introducing a dynamically-linked library that includes the tool’s definition of ompt_start_tool into the application’s address space" // 2.1 Link with tool during compilation // RUN: %libomp-compile -DCODE %no-as-needed-flag %T/tool.so && %libomp-run | FileCheck %s // 2.2 Link with tool during compilation, but AFTER the runtime // RUN: %libomp-compile -DCODE -lomp %no-as-needed-flag %T/tool.so && %libomp-run | FileCheck %s // 2.3 Inject tool via the dynamic loader // RUN: %libomp-compile -DCODE && %preload-tool %libomp-run | FileCheck %s // 3. "providing the name of a dynamically-linked library appropriate for the architecture and operating system used by the application in the tool-libraries-var ICV" // RUN: %libomp-compile -DCODE && env OMP_TOOL_LIBRARIES=%T/tool.so %libomp-run | FileCheck %s // REQUIRES: ompt /* * This file contains code for an OMPT shared library tool to be * loaded and the code for the OpenMP executable. * -DTOOL enables the code for the tool during compilation * -DCODE enables the code for the executable during compilation */ #ifdef CODE #include "omp.h" int main() { #pragma omp parallel num_threads(2) { } // Check if libomp supports the callbacks for this test. // CHECK-NOT: {{^}}0: Could not register callback // CHECK: {{^}}0: NULL_POINTER=[[NULL:.*$]] // CHECK: {{^}}0: ompt_event_runtime_shutdown return 0; } #endif /* CODE */ #ifdef TOOL #include <stdio.h> #include <omp-tools.h> int ompt_initialize( ompt_function_lookup_t lookup, ompt_data_t* tool_data) { printf("0: NULL_POINTER=%p\n", (void*)NULL); return 1; //success } void ompt_finalize(ompt_data_t* tool_data) { printf("0: ompt_event_runtime_shutdown\n"); } ompt_start_tool_result_t* ompt_start_tool( unsigned int omp_version, const char *runtime_version) { static ompt_start_tool_result_t ompt_start_tool_result = {&ompt_initialize,&ompt_finalize, 0}; return &ompt_start_tool_result; } #endif /* TOOL */
ASTMatchers.h
//===- ASTMatchers.h - Structural query framework ---------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file implements matchers to be used together with the MatchFinder to // match AST nodes. // // Matchers are created by generator functions, which can be combined in // a functional in-language DSL to express queries over the C++ AST. // // For example, to match a class with a certain name, one would call: // cxxRecordDecl(hasName("MyClass")) // which returns a matcher that can be used to find all AST nodes that declare // a class named 'MyClass'. // // For more complicated match expressions we're often interested in accessing // multiple parts of the matched AST nodes once a match is found. In that case, // call `.bind("name")` on match expressions that match the nodes you want to // access. // // For example, when we're interested in child classes of a certain class, we // would write: // cxxRecordDecl(hasName("MyClass"), has(recordDecl().bind("child"))) // When the match is found via the MatchFinder, a user provided callback will // be called with a BoundNodes instance that contains a mapping from the // strings that we provided for the `.bind()` calls to the nodes that were // matched. // In the given example, each time our matcher finds a match we get a callback // where "child" is bound to the RecordDecl node of the matching child // class declaration. // // See ASTMatchersInternal.h for a more in-depth explanation of the // implementation details of the matcher framework. // // See ASTMatchFinder.h for how to use the generated matchers to run over // an AST. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H #define LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H #include "clang/AST/ASTContext.h" #include "clang/AST/ASTTypeTraits.h" #include "clang/AST/Attr.h" #include "clang/AST/CXXInheritance.h" #include "clang/AST/Decl.h" #include "clang/AST/DeclCXX.h" #include "clang/AST/DeclFriend.h" #include "clang/AST/DeclObjC.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/LambdaCapture.h" #include "clang/AST/NestedNameSpecifier.h" #include "clang/AST/OpenMPClause.h" #include "clang/AST/OperationKinds.h" #include "clang/AST/ParentMapContext.h" #include "clang/AST/Stmt.h" #include "clang/AST/StmtCXX.h" #include "clang/AST/StmtObjC.h" #include "clang/AST/StmtOpenMP.h" #include "clang/AST/TemplateBase.h" #include "clang/AST/TemplateName.h" #include "clang/AST/Type.h" #include "clang/AST/TypeLoc.h" #include "clang/ASTMatchers/ASTMatchersInternal.h" #include "clang/ASTMatchers/ASTMatchersMacros.h" #include "clang/Basic/AttrKinds.h" #include "clang/Basic/ExceptionSpecificationType.h" #include "clang/Basic/FileManager.h" #include "clang/Basic/IdentifierTable.h" #include "clang/Basic/LLVM.h" #include "clang/Basic/SourceManager.h" #include "clang/Basic/Specifiers.h" #include "clang/Basic/TypeTraits.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringRef.h" #include "llvm/Support/Casting.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/Regex.h" #include <cassert> #include <cstddef> #include <iterator> #include <limits> #include <string> #include <utility> #include <vector> namespace clang { namespace ast_matchers { /// Maps string IDs to AST nodes matched by parts of a matcher. /// /// The bound nodes are generated by calling \c bind("id") on the node matchers /// of the nodes we want to access later. /// /// The instances of BoundNodes are created by \c MatchFinder when the user's /// callbacks are executed every time a match is found. class BoundNodes { public: /// Returns the AST node bound to \c ID. /// /// Returns NULL if there was no node bound to \c ID or if there is a node but /// it cannot be converted to the specified type. template <typename T> const T *getNodeAs(StringRef ID) const { return MyBoundNodes.getNodeAs<T>(ID); } /// Type of mapping from binding identifiers to bound nodes. This type /// is an associative container with a key type of \c std::string and a value /// type of \c clang::DynTypedNode using IDToNodeMap = internal::BoundNodesMap::IDToNodeMap; /// Retrieve mapping from binding identifiers to bound nodes. const IDToNodeMap &getMap() const { return MyBoundNodes.getMap(); } private: friend class internal::BoundNodesTreeBuilder; /// Create BoundNodes from a pre-filled map of bindings. BoundNodes(internal::BoundNodesMap &MyBoundNodes) : MyBoundNodes(MyBoundNodes) {} internal::BoundNodesMap MyBoundNodes; }; /// Types of matchers for the top-level classes in the AST class /// hierarchy. /// @{ using DeclarationMatcher = internal::Matcher<Decl>; using StatementMatcher = internal::Matcher<Stmt>; using TypeMatcher = internal::Matcher<QualType>; using TypeLocMatcher = internal::Matcher<TypeLoc>; using NestedNameSpecifierMatcher = internal::Matcher<NestedNameSpecifier>; using NestedNameSpecifierLocMatcher = internal::Matcher<NestedNameSpecifierLoc>; using CXXCtorInitializerMatcher = internal::Matcher<CXXCtorInitializer>; using TemplateArgumentLocMatcher = internal::Matcher<TemplateArgumentLoc>; /// @} /// Matches any node. /// /// Useful when another matcher requires a child matcher, but there's no /// additional constraint. This will often be used with an explicit conversion /// to an \c internal::Matcher<> type such as \c TypeMatcher. /// /// Example: \c DeclarationMatcher(anything()) matches all declarations, e.g., /// \code /// "int* p" and "void f()" in /// int* p; /// void f(); /// \endcode /// /// Usable as: Any Matcher inline internal::TrueMatcher anything() { return internal::TrueMatcher(); } /// Matches the top declaration context. /// /// Given /// \code /// int X; /// namespace NS { /// int Y; /// } // namespace NS /// \endcode /// decl(hasDeclContext(translationUnitDecl())) /// matches "int X", but not "int Y". extern const internal::VariadicDynCastAllOfMatcher<Decl, TranslationUnitDecl> translationUnitDecl; /// Matches typedef declarations. /// /// Given /// \code /// typedef int X; /// using Y = int; /// \endcode /// typedefDecl() /// matches "typedef int X", but not "using Y = int" extern const internal::VariadicDynCastAllOfMatcher<Decl, TypedefDecl> typedefDecl; /// Matches typedef name declarations. /// /// Given /// \code /// typedef int X; /// using Y = int; /// \endcode /// typedefNameDecl() /// matches "typedef int X" and "using Y = int" extern const internal::VariadicDynCastAllOfMatcher<Decl, TypedefNameDecl> typedefNameDecl; /// Matches type alias declarations. /// /// Given /// \code /// typedef int X; /// using Y = int; /// \endcode /// typeAliasDecl() /// matches "using Y = int", but not "typedef int X" extern const internal::VariadicDynCastAllOfMatcher<Decl, TypeAliasDecl> typeAliasDecl; /// Matches type alias template declarations. /// /// typeAliasTemplateDecl() matches /// \code /// template <typename T> /// using Y = X<T>; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, TypeAliasTemplateDecl> typeAliasTemplateDecl; /// Matches AST nodes that were expanded within the main-file. /// /// Example matches X but not Y /// (matcher = cxxRecordDecl(isExpansionInMainFile()) /// \code /// #include <Y.h> /// class X {}; /// \endcode /// Y.h: /// \code /// class Y {}; /// \endcode /// /// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc> AST_POLYMORPHIC_MATCHER(isExpansionInMainFile, AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc)) { auto &SourceManager = Finder->getASTContext().getSourceManager(); return SourceManager.isInMainFile( SourceManager.getExpansionLoc(Node.getBeginLoc())); } /// Matches AST nodes that were expanded within system-header-files. /// /// Example matches Y but not X /// (matcher = cxxRecordDecl(isExpansionInSystemHeader()) /// \code /// #include <SystemHeader.h> /// class X {}; /// \endcode /// SystemHeader.h: /// \code /// class Y {}; /// \endcode /// /// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc> AST_POLYMORPHIC_MATCHER(isExpansionInSystemHeader, AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc)) { auto &SourceManager = Finder->getASTContext().getSourceManager(); auto ExpansionLoc = SourceManager.getExpansionLoc(Node.getBeginLoc()); if (ExpansionLoc.isInvalid()) { return false; } return SourceManager.isInSystemHeader(ExpansionLoc); } /// Matches AST nodes that were expanded within files whose name is /// partially matching a given regex. /// /// Example matches Y but not X /// (matcher = cxxRecordDecl(isExpansionInFileMatching("AST.*")) /// \code /// #include "ASTMatcher.h" /// class X {}; /// \endcode /// ASTMatcher.h: /// \code /// class Y {}; /// \endcode /// /// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc> AST_POLYMORPHIC_MATCHER_REGEX(isExpansionInFileMatching, AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc), RegExp) { auto &SourceManager = Finder->getASTContext().getSourceManager(); auto ExpansionLoc = SourceManager.getExpansionLoc(Node.getBeginLoc()); if (ExpansionLoc.isInvalid()) { return false; } auto FileEntry = SourceManager.getFileEntryForID(SourceManager.getFileID(ExpansionLoc)); if (!FileEntry) { return false; } auto Filename = FileEntry->getName(); return RegExp->match(Filename); } /// Matches statements that are (transitively) expanded from the named macro. /// Does not match if only part of the statement is expanded from that macro or /// if different parts of the the statement are expanded from different /// appearances of the macro. /// /// FIXME: Change to be a polymorphic matcher that works on any syntactic /// node. There's nothing `Stmt`-specific about it. AST_MATCHER_P(Stmt, isExpandedFromMacro, llvm::StringRef, MacroName) { // Verifies that the statement' beginning and ending are both expanded from // the same instance of the given macro. auto& Context = Finder->getASTContext(); llvm::Optional<SourceLocation> B = internal::getExpansionLocOfMacro(MacroName, Node.getBeginLoc(), Context); if (!B) return false; llvm::Optional<SourceLocation> E = internal::getExpansionLocOfMacro(MacroName, Node.getEndLoc(), Context); if (!E) return false; return *B == *E; } /// Matches declarations. /// /// Examples matches \c X, \c C, and the friend declaration inside \c C; /// \code /// void X(); /// class C { /// friend X; /// }; /// \endcode extern const internal::VariadicAllOfMatcher<Decl> decl; /// Matches decomposition-declarations. /// /// Examples matches the declaration node with \c foo and \c bar, but not /// \c number. /// (matcher = declStmt(has(decompositionDecl()))) /// /// \code /// int number = 42; /// auto [foo, bar] = std::make_pair{42, 42}; /// \endcode extern const internal::VariadicAllOfMatcher<DecompositionDecl> decompositionDecl; /// Matches a declaration of a linkage specification. /// /// Given /// \code /// extern "C" {} /// \endcode /// linkageSpecDecl() /// matches "extern "C" {}" extern const internal::VariadicDynCastAllOfMatcher<Decl, LinkageSpecDecl> linkageSpecDecl; /// Matches a declaration of anything that could have a name. /// /// Example matches \c X, \c S, the anonymous union type, \c i, and \c U; /// \code /// typedef int X; /// struct S { /// union { /// int i; /// } U; /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, NamedDecl> namedDecl; /// Matches a declaration of label. /// /// Given /// \code /// goto FOO; /// FOO: bar(); /// \endcode /// labelDecl() /// matches 'FOO:' extern const internal::VariadicDynCastAllOfMatcher<Decl, LabelDecl> labelDecl; /// Matches a declaration of a namespace. /// /// Given /// \code /// namespace {} /// namespace test {} /// \endcode /// namespaceDecl() /// matches "namespace {}" and "namespace test {}" extern const internal::VariadicDynCastAllOfMatcher<Decl, NamespaceDecl> namespaceDecl; /// Matches a declaration of a namespace alias. /// /// Given /// \code /// namespace test {} /// namespace alias = ::test; /// \endcode /// namespaceAliasDecl() /// matches "namespace alias" but not "namespace test" extern const internal::VariadicDynCastAllOfMatcher<Decl, NamespaceAliasDecl> namespaceAliasDecl; /// Matches class, struct, and union declarations. /// /// Example matches \c X, \c Z, \c U, and \c S /// \code /// class X; /// template<class T> class Z {}; /// struct S {}; /// union U {}; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, RecordDecl> recordDecl; /// Matches C++ class declarations. /// /// Example matches \c X, \c Z /// \code /// class X; /// template<class T> class Z {}; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXRecordDecl> cxxRecordDecl; /// Matches C++ class template declarations. /// /// Example matches \c Z /// \code /// template<class T> class Z {}; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ClassTemplateDecl> classTemplateDecl; /// Matches C++ class template specializations. /// /// Given /// \code /// template<typename T> class A {}; /// template<> class A<double> {}; /// A<int> a; /// \endcode /// classTemplateSpecializationDecl() /// matches the specializations \c A<int> and \c A<double> extern const internal::VariadicDynCastAllOfMatcher< Decl, ClassTemplateSpecializationDecl> classTemplateSpecializationDecl; /// Matches C++ class template partial specializations. /// /// Given /// \code /// template<class T1, class T2, int I> /// class A {}; /// /// template<class T, int I> /// class A<T, T*, I> {}; /// /// template<> /// class A<int, int, 1> {}; /// \endcode /// classTemplatePartialSpecializationDecl() /// matches the specialization \c A<T,T*,I> but not \c A<int,int,1> extern const internal::VariadicDynCastAllOfMatcher< Decl, ClassTemplatePartialSpecializationDecl> classTemplatePartialSpecializationDecl; /// Matches declarator declarations (field, variable, function /// and non-type template parameter declarations). /// /// Given /// \code /// class X { int y; }; /// \endcode /// declaratorDecl() /// matches \c int y. extern const internal::VariadicDynCastAllOfMatcher<Decl, DeclaratorDecl> declaratorDecl; /// Matches parameter variable declarations. /// /// Given /// \code /// void f(int x); /// \endcode /// parmVarDecl() /// matches \c int x. extern const internal::VariadicDynCastAllOfMatcher<Decl, ParmVarDecl> parmVarDecl; /// Matches C++ access specifier declarations. /// /// Given /// \code /// class C { /// public: /// int a; /// }; /// \endcode /// accessSpecDecl() /// matches 'public:' extern const internal::VariadicDynCastAllOfMatcher<Decl, AccessSpecDecl> accessSpecDecl; /// Matches constructor initializers. /// /// Examples matches \c i(42). /// \code /// class C { /// C() : i(42) {} /// int i; /// }; /// \endcode extern const internal::VariadicAllOfMatcher<CXXCtorInitializer> cxxCtorInitializer; /// Matches template arguments. /// /// Given /// \code /// template <typename T> struct C {}; /// C<int> c; /// \endcode /// templateArgument() /// matches 'int' in C<int>. extern const internal::VariadicAllOfMatcher<TemplateArgument> templateArgument; /// Matches template arguments (with location info). /// /// Given /// \code /// template <typename T> struct C {}; /// C<int> c; /// \endcode /// templateArgumentLoc() /// matches 'int' in C<int>. extern const internal::VariadicAllOfMatcher<TemplateArgumentLoc> templateArgumentLoc; /// Matches template name. /// /// Given /// \code /// template <typename T> class X { }; /// X<int> xi; /// \endcode /// templateName() /// matches 'X' in X<int>. extern const internal::VariadicAllOfMatcher<TemplateName> templateName; /// Matches non-type template parameter declarations. /// /// Given /// \code /// template <typename T, int N> struct C {}; /// \endcode /// nonTypeTemplateParmDecl() /// matches 'N', but not 'T'. extern const internal::VariadicDynCastAllOfMatcher<Decl, NonTypeTemplateParmDecl> nonTypeTemplateParmDecl; /// Matches template type parameter declarations. /// /// Given /// \code /// template <typename T, int N> struct C {}; /// \endcode /// templateTypeParmDecl() /// matches 'T', but not 'N'. extern const internal::VariadicDynCastAllOfMatcher<Decl, TemplateTypeParmDecl> templateTypeParmDecl; /// Matches template template parameter declarations. /// /// Given /// \code /// template <template <typename> class Z, int N> struct C {}; /// \endcode /// templateTypeParmDecl() /// matches 'Z', but not 'N'. extern const internal::VariadicDynCastAllOfMatcher<Decl, TemplateTemplateParmDecl> templateTemplateParmDecl; /// Matches public C++ declarations and C++ base specifers that specify public /// inheritance. /// /// Examples: /// \code /// class C { /// public: int a; // fieldDecl(isPublic()) matches 'a' /// protected: int b; /// private: int c; /// }; /// \endcode /// /// \code /// class Base {}; /// class Derived1 : public Base {}; // matches 'Base' /// struct Derived2 : Base {}; // matches 'Base' /// \endcode AST_POLYMORPHIC_MATCHER(isPublic, AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, CXXBaseSpecifier)) { return getAccessSpecifier(Node) == AS_public; } /// Matches protected C++ declarations and C++ base specifers that specify /// protected inheritance. /// /// Examples: /// \code /// class C { /// public: int a; /// protected: int b; // fieldDecl(isProtected()) matches 'b' /// private: int c; /// }; /// \endcode /// /// \code /// class Base {}; /// class Derived : protected Base {}; // matches 'Base' /// \endcode AST_POLYMORPHIC_MATCHER(isProtected, AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, CXXBaseSpecifier)) { return getAccessSpecifier(Node) == AS_protected; } /// Matches private C++ declarations and C++ base specifers that specify private /// inheritance. /// /// Examples: /// \code /// class C { /// public: int a; /// protected: int b; /// private: int c; // fieldDecl(isPrivate()) matches 'c' /// }; /// \endcode /// /// \code /// struct Base {}; /// struct Derived1 : private Base {}; // matches 'Base' /// class Derived2 : Base {}; // matches 'Base' /// \endcode AST_POLYMORPHIC_MATCHER(isPrivate, AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, CXXBaseSpecifier)) { return getAccessSpecifier(Node) == AS_private; } /// Matches non-static data members that are bit-fields. /// /// Given /// \code /// class C { /// int a : 2; /// int b; /// }; /// \endcode /// fieldDecl(isBitField()) /// matches 'int a;' but not 'int b;'. AST_MATCHER(FieldDecl, isBitField) { return Node.isBitField(); } /// Matches non-static data members that are bit-fields of the specified /// bit width. /// /// Given /// \code /// class C { /// int a : 2; /// int b : 4; /// int c : 2; /// }; /// \endcode /// fieldDecl(hasBitWidth(2)) /// matches 'int a;' and 'int c;' but not 'int b;'. AST_MATCHER_P(FieldDecl, hasBitWidth, unsigned, Width) { return Node.isBitField() && Node.getBitWidthValue(Finder->getASTContext()) == Width; } /// Matches non-static data members that have an in-class initializer. /// /// Given /// \code /// class C { /// int a = 2; /// int b = 3; /// int c; /// }; /// \endcode /// fieldDecl(hasInClassInitializer(integerLiteral(equals(2)))) /// matches 'int a;' but not 'int b;'. /// fieldDecl(hasInClassInitializer(anything())) /// matches 'int a;' and 'int b;' but not 'int c;'. AST_MATCHER_P(FieldDecl, hasInClassInitializer, internal::Matcher<Expr>, InnerMatcher) { const Expr *Initializer = Node.getInClassInitializer(); return (Initializer != nullptr && InnerMatcher.matches(*Initializer, Finder, Builder)); } /// Determines whether the function is "main", which is the entry point /// into an executable program. AST_MATCHER(FunctionDecl, isMain) { return Node.isMain(); } /// Matches the specialized template of a specialization declaration. /// /// Given /// \code /// template<typename T> class A {}; #1 /// template<> class A<int> {}; #2 /// \endcode /// classTemplateSpecializationDecl(hasSpecializedTemplate(classTemplateDecl())) /// matches '#2' with classTemplateDecl() matching the class template /// declaration of 'A' at #1. AST_MATCHER_P(ClassTemplateSpecializationDecl, hasSpecializedTemplate, internal::Matcher<ClassTemplateDecl>, InnerMatcher) { const ClassTemplateDecl* Decl = Node.getSpecializedTemplate(); return (Decl != nullptr && InnerMatcher.matches(*Decl, Finder, Builder)); } /// Matches a declaration that has been implicitly added /// by the compiler (eg. implicit default/copy constructors). AST_MATCHER(Decl, isImplicit) { return Node.isImplicit(); } /// Matches classTemplateSpecializations, templateSpecializationType and /// functionDecl that have at least one TemplateArgument matching the given /// InnerMatcher. /// /// Given /// \code /// template<typename T> class A {}; /// template<> class A<double> {}; /// A<int> a; /// /// template<typename T> f() {}; /// void func() { f<int>(); }; /// \endcode /// /// \endcode /// classTemplateSpecializationDecl(hasAnyTemplateArgument( /// refersToType(asString("int")))) /// matches the specialization \c A<int> /// /// functionDecl(hasAnyTemplateArgument(refersToType(asString("int")))) /// matches the specialization \c f<int> AST_POLYMORPHIC_MATCHER_P( hasAnyTemplateArgument, AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl, TemplateSpecializationType, FunctionDecl), internal::Matcher<TemplateArgument>, InnerMatcher) { ArrayRef<TemplateArgument> List = internal::getTemplateSpecializationArgs(Node); return matchesFirstInRange(InnerMatcher, List.begin(), List.end(), Finder, Builder); } /// Causes all nested matchers to be matched with the specified traversal kind. /// /// Given /// \code /// void foo() /// { /// int i = 3.0; /// } /// \endcode /// The matcher /// \code /// traverse(TK_IgnoreImplicitCastsAndParentheses, /// varDecl(hasInitializer(floatLiteral().bind("init"))) /// ) /// \endcode /// matches the variable declaration with "init" bound to the "3.0". template <typename T> internal::Matcher<T> traverse(TraversalKind TK, const internal::Matcher<T> &InnerMatcher) { return internal::DynTypedMatcher::constructRestrictedWrapper( new internal::TraversalMatcher<T>(TK, InnerMatcher), InnerMatcher.getID().first) .template unconditionalConvertTo<T>(); } template <typename T> internal::BindableMatcher<T> traverse(TraversalKind TK, const internal::BindableMatcher<T> &InnerMatcher) { return internal::BindableMatcher<T>( internal::DynTypedMatcher::constructRestrictedWrapper( new internal::TraversalMatcher<T>(TK, InnerMatcher), InnerMatcher.getID().first) .template unconditionalConvertTo<T>()); } template <typename... T> internal::TraversalWrapper<internal::VariadicOperatorMatcher<T...>> traverse(TraversalKind TK, const internal::VariadicOperatorMatcher<T...> &InnerMatcher) { return internal::TraversalWrapper<internal::VariadicOperatorMatcher<T...>>( TK, InnerMatcher); } template <template <typename ToArg, typename FromArg> class ArgumentAdapterT, typename T, typename ToTypes> internal::TraversalWrapper< internal::ArgumentAdaptingMatcherFuncAdaptor<ArgumentAdapterT, T, ToTypes>> traverse(TraversalKind TK, const internal::ArgumentAdaptingMatcherFuncAdaptor< ArgumentAdapterT, T, ToTypes> &InnerMatcher) { return internal::TraversalWrapper< internal::ArgumentAdaptingMatcherFuncAdaptor<ArgumentAdapterT, T, ToTypes>>(TK, InnerMatcher); } template <template <typename T, typename P1> class MatcherT, typename P1, typename ReturnTypesF> internal::TraversalWrapper< internal::PolymorphicMatcherWithParam1<MatcherT, P1, ReturnTypesF>> traverse(TraversalKind TK, const internal::PolymorphicMatcherWithParam1< MatcherT, P1, ReturnTypesF> &InnerMatcher) { return internal::TraversalWrapper< internal::PolymorphicMatcherWithParam1<MatcherT, P1, ReturnTypesF>>( TK, InnerMatcher); } template <template <typename T, typename P1, typename P2> class MatcherT, typename P1, typename P2, typename ReturnTypesF> internal::TraversalWrapper< internal::PolymorphicMatcherWithParam2<MatcherT, P1, P2, ReturnTypesF>> traverse(TraversalKind TK, const internal::PolymorphicMatcherWithParam2< MatcherT, P1, P2, ReturnTypesF> &InnerMatcher) { return internal::TraversalWrapper< internal::PolymorphicMatcherWithParam2<MatcherT, P1, P2, ReturnTypesF>>( TK, InnerMatcher); } /// Matches expressions that match InnerMatcher after any implicit AST /// nodes are stripped off. /// /// Parentheses and explicit casts are not discarded. /// Given /// \code /// class C {}; /// C a = C(); /// C b; /// C c = b; /// \endcode /// The matchers /// \code /// varDecl(hasInitializer(ignoringImplicit(cxxConstructExpr()))) /// \endcode /// would match the declarations for a, b, and c. /// While /// \code /// varDecl(hasInitializer(cxxConstructExpr())) /// \endcode /// only match the declarations for b and c. AST_MATCHER_P(Expr, ignoringImplicit, internal::Matcher<Expr>, InnerMatcher) { return InnerMatcher.matches(*Node.IgnoreImplicit(), Finder, Builder); } /// Matches expressions that match InnerMatcher after any implicit casts /// are stripped off. /// /// Parentheses and explicit casts are not discarded. /// Given /// \code /// int arr[5]; /// int a = 0; /// char b = 0; /// const int c = a; /// int *d = arr; /// long e = (long) 0l; /// \endcode /// The matchers /// \code /// varDecl(hasInitializer(ignoringImpCasts(integerLiteral()))) /// varDecl(hasInitializer(ignoringImpCasts(declRefExpr()))) /// \endcode /// would match the declarations for a, b, c, and d, but not e. /// While /// \code /// varDecl(hasInitializer(integerLiteral())) /// varDecl(hasInitializer(declRefExpr())) /// \endcode /// only match the declarations for b, c, and d. AST_MATCHER_P(Expr, ignoringImpCasts, internal::Matcher<Expr>, InnerMatcher) { return InnerMatcher.matches(*Node.IgnoreImpCasts(), Finder, Builder); } /// Matches expressions that match InnerMatcher after parentheses and /// casts are stripped off. /// /// Implicit and non-C Style casts are also discarded. /// Given /// \code /// int a = 0; /// char b = (0); /// void* c = reinterpret_cast<char*>(0); /// char d = char(0); /// \endcode /// The matcher /// varDecl(hasInitializer(ignoringParenCasts(integerLiteral()))) /// would match the declarations for a, b, c, and d. /// while /// varDecl(hasInitializer(integerLiteral())) /// only match the declaration for a. AST_MATCHER_P(Expr, ignoringParenCasts, internal::Matcher<Expr>, InnerMatcher) { return InnerMatcher.matches(*Node.IgnoreParenCasts(), Finder, Builder); } /// Matches expressions that match InnerMatcher after implicit casts and /// parentheses are stripped off. /// /// Explicit casts are not discarded. /// Given /// \code /// int arr[5]; /// int a = 0; /// char b = (0); /// const int c = a; /// int *d = (arr); /// long e = ((long) 0l); /// \endcode /// The matchers /// varDecl(hasInitializer(ignoringParenImpCasts(integerLiteral()))) /// varDecl(hasInitializer(ignoringParenImpCasts(declRefExpr()))) /// would match the declarations for a, b, c, and d, but not e. /// while /// varDecl(hasInitializer(integerLiteral())) /// varDecl(hasInitializer(declRefExpr())) /// would only match the declaration for a. AST_MATCHER_P(Expr, ignoringParenImpCasts, internal::Matcher<Expr>, InnerMatcher) { return InnerMatcher.matches(*Node.IgnoreParenImpCasts(), Finder, Builder); } /// Matches types that match InnerMatcher after any parens are stripped. /// /// Given /// \code /// void (*fp)(void); /// \endcode /// The matcher /// \code /// varDecl(hasType(pointerType(pointee(ignoringParens(functionType()))))) /// \endcode /// would match the declaration for fp. AST_MATCHER_P_OVERLOAD(QualType, ignoringParens, internal::Matcher<QualType>, InnerMatcher, 0) { return InnerMatcher.matches(Node.IgnoreParens(), Finder, Builder); } /// Overload \c ignoringParens for \c Expr. /// /// Given /// \code /// const char* str = ("my-string"); /// \endcode /// The matcher /// \code /// implicitCastExpr(hasSourceExpression(ignoringParens(stringLiteral()))) /// \endcode /// would match the implicit cast resulting from the assignment. AST_MATCHER_P_OVERLOAD(Expr, ignoringParens, internal::Matcher<Expr>, InnerMatcher, 1) { const Expr *E = Node.IgnoreParens(); return InnerMatcher.matches(*E, Finder, Builder); } /// Matches expressions that are instantiation-dependent even if it is /// neither type- nor value-dependent. /// /// In the following example, the expression sizeof(sizeof(T() + T())) /// is instantiation-dependent (since it involves a template parameter T), /// but is neither type- nor value-dependent, since the type of the inner /// sizeof is known (std::size_t) and therefore the size of the outer /// sizeof is known. /// \code /// template<typename T> /// void f(T x, T y) { sizeof(sizeof(T() + T()); } /// \endcode /// expr(isInstantiationDependent()) matches sizeof(sizeof(T() + T()) AST_MATCHER(Expr, isInstantiationDependent) { return Node.isInstantiationDependent(); } /// Matches expressions that are type-dependent because the template type /// is not yet instantiated. /// /// For example, the expressions "x" and "x + y" are type-dependent in /// the following code, but "y" is not type-dependent: /// \code /// template<typename T> /// void add(T x, int y) { /// x + y; /// } /// \endcode /// expr(isTypeDependent()) matches x + y AST_MATCHER(Expr, isTypeDependent) { return Node.isTypeDependent(); } /// Matches expression that are value-dependent because they contain a /// non-type template parameter. /// /// For example, the array bound of "Chars" in the following example is /// value-dependent. /// \code /// template<int Size> int f() { return Size; } /// \endcode /// expr(isValueDependent()) matches return Size AST_MATCHER(Expr, isValueDependent) { return Node.isValueDependent(); } /// Matches classTemplateSpecializations, templateSpecializationType and /// functionDecl where the n'th TemplateArgument matches the given InnerMatcher. /// /// Given /// \code /// template<typename T, typename U> class A {}; /// A<bool, int> b; /// A<int, bool> c; /// /// template<typename T> void f() {} /// void func() { f<int>(); }; /// \endcode /// classTemplateSpecializationDecl(hasTemplateArgument( /// 1, refersToType(asString("int")))) /// matches the specialization \c A<bool, int> /// /// functionDecl(hasTemplateArgument(0, refersToType(asString("int")))) /// matches the specialization \c f<int> AST_POLYMORPHIC_MATCHER_P2( hasTemplateArgument, AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl, TemplateSpecializationType, FunctionDecl), unsigned, N, internal::Matcher<TemplateArgument>, InnerMatcher) { ArrayRef<TemplateArgument> List = internal::getTemplateSpecializationArgs(Node); if (List.size() <= N) return false; return InnerMatcher.matches(List[N], Finder, Builder); } /// Matches if the number of template arguments equals \p N. /// /// Given /// \code /// template<typename T> struct C {}; /// C<int> c; /// \endcode /// classTemplateSpecializationDecl(templateArgumentCountIs(1)) /// matches C<int>. AST_POLYMORPHIC_MATCHER_P( templateArgumentCountIs, AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl, TemplateSpecializationType), unsigned, N) { return internal::getTemplateSpecializationArgs(Node).size() == N; } /// Matches a TemplateArgument that refers to a certain type. /// /// Given /// \code /// struct X {}; /// template<typename T> struct A {}; /// A<X> a; /// \endcode /// classTemplateSpecializationDecl(hasAnyTemplateArgument( /// refersToType(class(hasName("X"))))) /// matches the specialization \c A<X> AST_MATCHER_P(TemplateArgument, refersToType, internal::Matcher<QualType>, InnerMatcher) { if (Node.getKind() != TemplateArgument::Type) return false; return InnerMatcher.matches(Node.getAsType(), Finder, Builder); } /// Matches a TemplateArgument that refers to a certain template. /// /// Given /// \code /// template<template <typename> class S> class X {}; /// template<typename T> class Y {}; /// X<Y> xi; /// \endcode /// classTemplateSpecializationDecl(hasAnyTemplateArgument( /// refersToTemplate(templateName()))) /// matches the specialization \c X<Y> AST_MATCHER_P(TemplateArgument, refersToTemplate, internal::Matcher<TemplateName>, InnerMatcher) { if (Node.getKind() != TemplateArgument::Template) return false; return InnerMatcher.matches(Node.getAsTemplate(), Finder, Builder); } /// Matches a canonical TemplateArgument that refers to a certain /// declaration. /// /// Given /// \code /// struct B { int next; }; /// template<int(B::*next_ptr)> struct A {}; /// A<&B::next> a; /// \endcode /// classTemplateSpecializationDecl(hasAnyTemplateArgument( /// refersToDeclaration(fieldDecl(hasName("next"))))) /// matches the specialization \c A<&B::next> with \c fieldDecl(...) matching /// \c B::next AST_MATCHER_P(TemplateArgument, refersToDeclaration, internal::Matcher<Decl>, InnerMatcher) { if (Node.getKind() == TemplateArgument::Declaration) return InnerMatcher.matches(*Node.getAsDecl(), Finder, Builder); return false; } /// Matches a sugar TemplateArgument that refers to a certain expression. /// /// Given /// \code /// struct B { int next; }; /// template<int(B::*next_ptr)> struct A {}; /// A<&B::next> a; /// \endcode /// templateSpecializationType(hasAnyTemplateArgument( /// isExpr(hasDescendant(declRefExpr(to(fieldDecl(hasName("next")))))))) /// matches the specialization \c A<&B::next> with \c fieldDecl(...) matching /// \c B::next AST_MATCHER_P(TemplateArgument, isExpr, internal::Matcher<Expr>, InnerMatcher) { if (Node.getKind() == TemplateArgument::Expression) return InnerMatcher.matches(*Node.getAsExpr(), Finder, Builder); return false; } /// Matches a TemplateArgument that is an integral value. /// /// Given /// \code /// template<int T> struct C {}; /// C<42> c; /// \endcode /// classTemplateSpecializationDecl( /// hasAnyTemplateArgument(isIntegral())) /// matches the implicit instantiation of C in C<42> /// with isIntegral() matching 42. AST_MATCHER(TemplateArgument, isIntegral) { return Node.getKind() == TemplateArgument::Integral; } /// Matches a TemplateArgument that refers to an integral type. /// /// Given /// \code /// template<int T> struct C {}; /// C<42> c; /// \endcode /// classTemplateSpecializationDecl( /// hasAnyTemplateArgument(refersToIntegralType(asString("int")))) /// matches the implicit instantiation of C in C<42>. AST_MATCHER_P(TemplateArgument, refersToIntegralType, internal::Matcher<QualType>, InnerMatcher) { if (Node.getKind() != TemplateArgument::Integral) return false; return InnerMatcher.matches(Node.getIntegralType(), Finder, Builder); } /// Matches a TemplateArgument of integral type with a given value. /// /// Note that 'Value' is a string as the template argument's value is /// an arbitrary precision integer. 'Value' must be euqal to the canonical /// representation of that integral value in base 10. /// /// Given /// \code /// template<int T> struct C {}; /// C<42> c; /// \endcode /// classTemplateSpecializationDecl( /// hasAnyTemplateArgument(equalsIntegralValue("42"))) /// matches the implicit instantiation of C in C<42>. AST_MATCHER_P(TemplateArgument, equalsIntegralValue, std::string, Value) { if (Node.getKind() != TemplateArgument::Integral) return false; return Node.getAsIntegral().toString(10) == Value; } /// Matches an Objective-C autorelease pool statement. /// /// Given /// \code /// @autoreleasepool { /// int x = 0; /// } /// \endcode /// autoreleasePoolStmt(stmt()) matches the declaration of "x" /// inside the autorelease pool. extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAutoreleasePoolStmt> autoreleasePoolStmt; /// Matches any value declaration. /// /// Example matches A, B, C and F /// \code /// enum X { A, B, C }; /// void F(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ValueDecl> valueDecl; /// Matches C++ constructor declarations. /// /// Example matches Foo::Foo() and Foo::Foo(int) /// \code /// class Foo { /// public: /// Foo(); /// Foo(int); /// int DoSomething(); /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXConstructorDecl> cxxConstructorDecl; /// Matches explicit C++ destructor declarations. /// /// Example matches Foo::~Foo() /// \code /// class Foo { /// public: /// virtual ~Foo(); /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXDestructorDecl> cxxDestructorDecl; /// Matches enum declarations. /// /// Example matches X /// \code /// enum X { /// A, B, C /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, EnumDecl> enumDecl; /// Matches enum constants. /// /// Example matches A, B, C /// \code /// enum X { /// A, B, C /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, EnumConstantDecl> enumConstantDecl; /// Matches tag declarations. /// /// Example matches X, Z, U, S, E /// \code /// class X; /// template<class T> class Z {}; /// struct S {}; /// union U {}; /// enum E { /// A, B, C /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, TagDecl> tagDecl; /// Matches method declarations. /// /// Example matches y /// \code /// class X { void y(); }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXMethodDecl> cxxMethodDecl; /// Matches conversion operator declarations. /// /// Example matches the operator. /// \code /// class X { operator int() const; }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXConversionDecl> cxxConversionDecl; /// Matches user-defined and implicitly generated deduction guide. /// /// Example matches the deduction guide. /// \code /// template<typename T> /// class X { X(int) }; /// X(int) -> X<int>; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXDeductionGuideDecl> cxxDeductionGuideDecl; /// Matches variable declarations. /// /// Note: this does not match declarations of member variables, which are /// "field" declarations in Clang parlance. /// /// Example matches a /// \code /// int a; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, VarDecl> varDecl; /// Matches field declarations. /// /// Given /// \code /// class X { int m; }; /// \endcode /// fieldDecl() /// matches 'm'. extern const internal::VariadicDynCastAllOfMatcher<Decl, FieldDecl> fieldDecl; /// Matches indirect field declarations. /// /// Given /// \code /// struct X { struct { int a; }; }; /// \endcode /// indirectFieldDecl() /// matches 'a'. extern const internal::VariadicDynCastAllOfMatcher<Decl, IndirectFieldDecl> indirectFieldDecl; /// Matches function declarations. /// /// Example matches f /// \code /// void f(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, FunctionDecl> functionDecl; /// Matches C++ function template declarations. /// /// Example matches f /// \code /// template<class T> void f(T t) {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, FunctionTemplateDecl> functionTemplateDecl; /// Matches friend declarations. /// /// Given /// \code /// class X { friend void foo(); }; /// \endcode /// friendDecl() /// matches 'friend void foo()'. extern const internal::VariadicDynCastAllOfMatcher<Decl, FriendDecl> friendDecl; /// Matches statements. /// /// Given /// \code /// { ++a; } /// \endcode /// stmt() /// matches both the compound statement '{ ++a; }' and '++a'. extern const internal::VariadicAllOfMatcher<Stmt> stmt; /// Matches declaration statements. /// /// Given /// \code /// int a; /// \endcode /// declStmt() /// matches 'int a'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, DeclStmt> declStmt; /// Matches member expressions. /// /// Given /// \code /// class Y { /// void x() { this->x(); x(); Y y; y.x(); a; this->b; Y::b; } /// int a; static int b; /// }; /// \endcode /// memberExpr() /// matches this->x, x, y.x, a, this->b extern const internal::VariadicDynCastAllOfMatcher<Stmt, MemberExpr> memberExpr; /// Matches unresolved member expressions. /// /// Given /// \code /// struct X { /// template <class T> void f(); /// void g(); /// }; /// template <class T> void h() { X x; x.f<T>(); x.g(); } /// \endcode /// unresolvedMemberExpr() /// matches x.f<T> extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnresolvedMemberExpr> unresolvedMemberExpr; /// Matches member expressions where the actual member referenced could not be /// resolved because the base expression or the member name was dependent. /// /// Given /// \code /// template <class T> void f() { T t; t.g(); } /// \endcode /// cxxDependentScopeMemberExpr() /// matches t.g extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDependentScopeMemberExpr> cxxDependentScopeMemberExpr; /// Matches call expressions. /// /// Example matches x.y() and y() /// \code /// X x; /// x.y(); /// y(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CallExpr> callExpr; /// Matches call expressions which were resolved using ADL. /// /// Example matches y(x) but not y(42) or NS::y(x). /// \code /// namespace NS { /// struct X {}; /// void y(X); /// } /// /// void y(...); /// /// void test() { /// NS::X x; /// y(x); // Matches /// NS::y(x); // Doesn't match /// y(42); // Doesn't match /// using NS::y; /// y(x); // Found by both unqualified lookup and ADL, doesn't match // } /// \endcode AST_MATCHER(CallExpr, usesADL) { return Node.usesADL(); } /// Matches lambda expressions. /// /// Example matches [&](){return 5;} /// \code /// [&](){return 5;} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, LambdaExpr> lambdaExpr; /// Matches member call expressions. /// /// Example matches x.y() /// \code /// X x; /// x.y(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXMemberCallExpr> cxxMemberCallExpr; /// Matches ObjectiveC Message invocation expressions. /// /// The innermost message send invokes the "alloc" class method on the /// NSString class, while the outermost message send invokes the /// "initWithString" instance method on the object returned from /// NSString's "alloc". This matcher should match both message sends. /// \code /// [[NSString alloc] initWithString:@"Hello"] /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCMessageExpr> objcMessageExpr; /// Matches Objective-C interface declarations. /// /// Example matches Foo /// \code /// @interface Foo /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCInterfaceDecl> objcInterfaceDecl; /// Matches Objective-C implementation declarations. /// /// Example matches Foo /// \code /// @implementation Foo /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCImplementationDecl> objcImplementationDecl; /// Matches Objective-C protocol declarations. /// /// Example matches FooDelegate /// \code /// @protocol FooDelegate /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCProtocolDecl> objcProtocolDecl; /// Matches Objective-C category declarations. /// /// Example matches Foo (Additions) /// \code /// @interface Foo (Additions) /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCCategoryDecl> objcCategoryDecl; /// Matches Objective-C category definitions. /// /// Example matches Foo (Additions) /// \code /// @implementation Foo (Additions) /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCCategoryImplDecl> objcCategoryImplDecl; /// Matches Objective-C method declarations. /// /// Example matches both declaration and definition of -[Foo method] /// \code /// @interface Foo /// - (void)method; /// @end /// /// @implementation Foo /// - (void)method {} /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCMethodDecl> objcMethodDecl; /// Matches block declarations. /// /// Example matches the declaration of the nameless block printing an input /// integer. /// /// \code /// myFunc(^(int p) { /// printf("%d", p); /// }) /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, BlockDecl> blockDecl; /// Matches Objective-C instance variable declarations. /// /// Example matches _enabled /// \code /// @implementation Foo { /// BOOL _enabled; /// } /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCIvarDecl> objcIvarDecl; /// Matches Objective-C property declarations. /// /// Example matches enabled /// \code /// @interface Foo /// @property BOOL enabled; /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCPropertyDecl> objcPropertyDecl; /// Matches Objective-C \@throw statements. /// /// Example matches \@throw /// \code /// @throw obj; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtThrowStmt> objcThrowStmt; /// Matches Objective-C @try statements. /// /// Example matches @try /// \code /// @try {} /// @catch (...) {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtTryStmt> objcTryStmt; /// Matches Objective-C @catch statements. /// /// Example matches @catch /// \code /// @try {} /// @catch (...) {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtCatchStmt> objcCatchStmt; /// Matches Objective-C @finally statements. /// /// Example matches @finally /// \code /// @try {} /// @finally {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtFinallyStmt> objcFinallyStmt; /// Matches expressions that introduce cleanups to be run at the end /// of the sub-expression's evaluation. /// /// Example matches std::string() /// \code /// const std::string str = std::string(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ExprWithCleanups> exprWithCleanups; /// Matches init list expressions. /// /// Given /// \code /// int a[] = { 1, 2 }; /// struct B { int x, y; }; /// B b = { 5, 6 }; /// \endcode /// initListExpr() /// matches "{ 1, 2 }" and "{ 5, 6 }" extern const internal::VariadicDynCastAllOfMatcher<Stmt, InitListExpr> initListExpr; /// Matches the syntactic form of init list expressions /// (if expression have it). AST_MATCHER_P(InitListExpr, hasSyntacticForm, internal::Matcher<Expr>, InnerMatcher) { const Expr *SyntForm = Node.getSyntacticForm(); return (SyntForm != nullptr && InnerMatcher.matches(*SyntForm, Finder, Builder)); } /// Matches C++ initializer list expressions. /// /// Given /// \code /// std::vector<int> a({ 1, 2, 3 }); /// std::vector<int> b = { 4, 5 }; /// int c[] = { 6, 7 }; /// std::pair<int, int> d = { 8, 9 }; /// \endcode /// cxxStdInitializerListExpr() /// matches "{ 1, 2, 3 }" and "{ 4, 5 }" extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXStdInitializerListExpr> cxxStdInitializerListExpr; /// Matches implicit initializers of init list expressions. /// /// Given /// \code /// point ptarray[10] = { [2].y = 1.0, [2].x = 2.0, [0].x = 1.0 }; /// \endcode /// implicitValueInitExpr() /// matches "[0].y" (implicitly) extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImplicitValueInitExpr> implicitValueInitExpr; /// Matches paren list expressions. /// ParenListExprs don't have a predefined type and are used for late parsing. /// In the final AST, they can be met in template declarations. /// /// Given /// \code /// template<typename T> class X { /// void f() { /// X x(*this); /// int a = 0, b = 1; int i = (a, b); /// } /// }; /// \endcode /// parenListExpr() matches "*this" but NOT matches (a, b) because (a, b) /// has a predefined type and is a ParenExpr, not a ParenListExpr. extern const internal::VariadicDynCastAllOfMatcher<Stmt, ParenListExpr> parenListExpr; /// Matches substitutions of non-type template parameters. /// /// Given /// \code /// template <int N> /// struct A { static const int n = N; }; /// struct B : public A<42> {}; /// \endcode /// substNonTypeTemplateParmExpr() /// matches "N" in the right-hand side of "static const int n = N;" extern const internal::VariadicDynCastAllOfMatcher<Stmt, SubstNonTypeTemplateParmExpr> substNonTypeTemplateParmExpr; /// Matches using declarations. /// /// Given /// \code /// namespace X { int x; } /// using X::x; /// \endcode /// usingDecl() /// matches \code using X::x \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, UsingDecl> usingDecl; /// Matches using namespace declarations. /// /// Given /// \code /// namespace X { int x; } /// using namespace X; /// \endcode /// usingDirectiveDecl() /// matches \code using namespace X \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, UsingDirectiveDecl> usingDirectiveDecl; /// Matches reference to a name that can be looked up during parsing /// but could not be resolved to a specific declaration. /// /// Given /// \code /// template<typename T> /// T foo() { T a; return a; } /// template<typename T> /// void bar() { /// foo<T>(); /// } /// \endcode /// unresolvedLookupExpr() /// matches \code foo<T>() \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnresolvedLookupExpr> unresolvedLookupExpr; /// Matches unresolved using value declarations. /// /// Given /// \code /// template<typename X> /// class C : private X { /// using X::x; /// }; /// \endcode /// unresolvedUsingValueDecl() /// matches \code using X::x \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, UnresolvedUsingValueDecl> unresolvedUsingValueDecl; /// Matches unresolved using value declarations that involve the /// typename. /// /// Given /// \code /// template <typename T> /// struct Base { typedef T Foo; }; /// /// template<typename T> /// struct S : private Base<T> { /// using typename Base<T>::Foo; /// }; /// \endcode /// unresolvedUsingTypenameDecl() /// matches \code using Base<T>::Foo \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, UnresolvedUsingTypenameDecl> unresolvedUsingTypenameDecl; /// Matches a constant expression wrapper. /// /// Example matches the constant in the case statement: /// (matcher = constantExpr()) /// \code /// switch (a) { /// case 37: break; /// } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ConstantExpr> constantExpr; /// Matches parentheses used in expressions. /// /// Example matches (foo() + 1) /// \code /// int foo() { return 1; } /// int a = (foo() + 1); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ParenExpr> parenExpr; /// Matches constructor call expressions (including implicit ones). /// /// Example matches string(ptr, n) and ptr within arguments of f /// (matcher = cxxConstructExpr()) /// \code /// void f(const string &a, const string &b); /// char *ptr; /// int n; /// f(string(ptr, n), ptr); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXConstructExpr> cxxConstructExpr; /// Matches unresolved constructor call expressions. /// /// Example matches T(t) in return statement of f /// (matcher = cxxUnresolvedConstructExpr()) /// \code /// template <typename T> /// void f(const T& t) { return T(t); } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXUnresolvedConstructExpr> cxxUnresolvedConstructExpr; /// Matches implicit and explicit this expressions. /// /// Example matches the implicit this expression in "return i". /// (matcher = cxxThisExpr()) /// \code /// struct foo { /// int i; /// int f() { return i; } /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXThisExpr> cxxThisExpr; /// Matches nodes where temporaries are created. /// /// Example matches FunctionTakesString(GetStringByValue()) /// (matcher = cxxBindTemporaryExpr()) /// \code /// FunctionTakesString(GetStringByValue()); /// FunctionTakesStringByPointer(GetStringPointer()); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXBindTemporaryExpr> cxxBindTemporaryExpr; /// Matches nodes where temporaries are materialized. /// /// Example: Given /// \code /// struct T {void func();}; /// T f(); /// void g(T); /// \endcode /// materializeTemporaryExpr() matches 'f()' in these statements /// \code /// T u(f()); /// g(f()); /// f().func(); /// \endcode /// but does not match /// \code /// f(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, MaterializeTemporaryExpr> materializeTemporaryExpr; /// Matches new expressions. /// /// Given /// \code /// new X; /// \endcode /// cxxNewExpr() /// matches 'new X'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNewExpr> cxxNewExpr; /// Matches delete expressions. /// /// Given /// \code /// delete X; /// \endcode /// cxxDeleteExpr() /// matches 'delete X'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDeleteExpr> cxxDeleteExpr; /// Matches noexcept expressions. /// /// Given /// \code /// bool a() noexcept; /// bool b() noexcept(true); /// bool c() noexcept(false); /// bool d() noexcept(noexcept(a())); /// bool e = noexcept(b()) || noexcept(c()); /// \endcode /// cxxNoexceptExpr() /// matches `noexcept(a())`, `noexcept(b())` and `noexcept(c())`. /// doesn't match the noexcept specifier in the declarations a, b, c or d. extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNoexceptExpr> cxxNoexceptExpr; /// Matches array subscript expressions. /// /// Given /// \code /// int i = a[1]; /// \endcode /// arraySubscriptExpr() /// matches "a[1]" extern const internal::VariadicDynCastAllOfMatcher<Stmt, ArraySubscriptExpr> arraySubscriptExpr; /// Matches the value of a default argument at the call site. /// /// Example matches the CXXDefaultArgExpr placeholder inserted for the /// default value of the second parameter in the call expression f(42) /// (matcher = cxxDefaultArgExpr()) /// \code /// void f(int x, int y = 0); /// f(42); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDefaultArgExpr> cxxDefaultArgExpr; /// Matches overloaded operator calls. /// /// Note that if an operator isn't overloaded, it won't match. Instead, use /// binaryOperator matcher. /// Currently it does not match operators such as new delete. /// FIXME: figure out why these do not match? /// /// Example matches both operator<<((o << b), c) and operator<<(o, b) /// (matcher = cxxOperatorCallExpr()) /// \code /// ostream &operator<< (ostream &out, int i) { }; /// ostream &o; int b = 1, c = 1; /// o << b << c; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXOperatorCallExpr> cxxOperatorCallExpr; /// Matches expressions. /// /// Example matches x() /// \code /// void f() { x(); } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, Expr> expr; /// Matches expressions that refer to declarations. /// /// Example matches x in if (x) /// \code /// bool x; /// if (x) {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, DeclRefExpr> declRefExpr; /// Matches a reference to an ObjCIvar. /// /// Example: matches "a" in "init" method: /// \code /// @implementation A { /// NSString *a; /// } /// - (void) init { /// a = @"hello"; /// } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCIvarRefExpr> objcIvarRefExpr; /// Matches a reference to a block. /// /// Example: matches "^{}": /// \code /// void f() { ^{}(); } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, BlockExpr> blockExpr; /// Matches if statements. /// /// Example matches 'if (x) {}' /// \code /// if (x) {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, IfStmt> ifStmt; /// Matches for statements. /// /// Example matches 'for (;;) {}' /// \code /// for (;;) {} /// int i[] = {1, 2, 3}; for (auto a : i); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ForStmt> forStmt; /// Matches the increment statement of a for loop. /// /// Example: /// forStmt(hasIncrement(unaryOperator(hasOperatorName("++")))) /// matches '++x' in /// \code /// for (x; x < N; ++x) { } /// \endcode AST_MATCHER_P(ForStmt, hasIncrement, internal::Matcher<Stmt>, InnerMatcher) { const Stmt *const Increment = Node.getInc(); return (Increment != nullptr && InnerMatcher.matches(*Increment, Finder, Builder)); } /// Matches the initialization statement of a for loop. /// /// Example: /// forStmt(hasLoopInit(declStmt())) /// matches 'int x = 0' in /// \code /// for (int x = 0; x < N; ++x) { } /// \endcode AST_MATCHER_P(ForStmt, hasLoopInit, internal::Matcher<Stmt>, InnerMatcher) { const Stmt *const Init = Node.getInit(); return (Init != nullptr && InnerMatcher.matches(*Init, Finder, Builder)); } /// Matches range-based for statements. /// /// cxxForRangeStmt() matches 'for (auto a : i)' /// \code /// int i[] = {1, 2, 3}; for (auto a : i); /// for(int j = 0; j < 5; ++j); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXForRangeStmt> cxxForRangeStmt; /// Matches the initialization statement of a for loop. /// /// Example: /// forStmt(hasLoopVariable(anything())) /// matches 'int x' in /// \code /// for (int x : a) { } /// \endcode AST_MATCHER_P(CXXForRangeStmt, hasLoopVariable, internal::Matcher<VarDecl>, InnerMatcher) { const VarDecl *const Var = Node.getLoopVariable(); return (Var != nullptr && InnerMatcher.matches(*Var, Finder, Builder)); } /// Matches the range initialization statement of a for loop. /// /// Example: /// forStmt(hasRangeInit(anything())) /// matches 'a' in /// \code /// for (int x : a) { } /// \endcode AST_MATCHER_P(CXXForRangeStmt, hasRangeInit, internal::Matcher<Expr>, InnerMatcher) { const Expr *const Init = Node.getRangeInit(); return (Init != nullptr && InnerMatcher.matches(*Init, Finder, Builder)); } /// Matches while statements. /// /// Given /// \code /// while (true) {} /// \endcode /// whileStmt() /// matches 'while (true) {}'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, WhileStmt> whileStmt; /// Matches do statements. /// /// Given /// \code /// do {} while (true); /// \endcode /// doStmt() /// matches 'do {} while(true)' extern const internal::VariadicDynCastAllOfMatcher<Stmt, DoStmt> doStmt; /// Matches break statements. /// /// Given /// \code /// while (true) { break; } /// \endcode /// breakStmt() /// matches 'break' extern const internal::VariadicDynCastAllOfMatcher<Stmt, BreakStmt> breakStmt; /// Matches continue statements. /// /// Given /// \code /// while (true) { continue; } /// \endcode /// continueStmt() /// matches 'continue' extern const internal::VariadicDynCastAllOfMatcher<Stmt, ContinueStmt> continueStmt; /// Matches return statements. /// /// Given /// \code /// return 1; /// \endcode /// returnStmt() /// matches 'return 1' extern const internal::VariadicDynCastAllOfMatcher<Stmt, ReturnStmt> returnStmt; /// Matches goto statements. /// /// Given /// \code /// goto FOO; /// FOO: bar(); /// \endcode /// gotoStmt() /// matches 'goto FOO' extern const internal::VariadicDynCastAllOfMatcher<Stmt, GotoStmt> gotoStmt; /// Matches label statements. /// /// Given /// \code /// goto FOO; /// FOO: bar(); /// \endcode /// labelStmt() /// matches 'FOO:' extern const internal::VariadicDynCastAllOfMatcher<Stmt, LabelStmt> labelStmt; /// Matches address of label statements (GNU extension). /// /// Given /// \code /// FOO: bar(); /// void *ptr = &&FOO; /// goto *bar; /// \endcode /// addrLabelExpr() /// matches '&&FOO' extern const internal::VariadicDynCastAllOfMatcher<Stmt, AddrLabelExpr> addrLabelExpr; /// Matches switch statements. /// /// Given /// \code /// switch(a) { case 42: break; default: break; } /// \endcode /// switchStmt() /// matches 'switch(a)'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, SwitchStmt> switchStmt; /// Matches case and default statements inside switch statements. /// /// Given /// \code /// switch(a) { case 42: break; default: break; } /// \endcode /// switchCase() /// matches 'case 42:' and 'default:'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, SwitchCase> switchCase; /// Matches case statements inside switch statements. /// /// Given /// \code /// switch(a) { case 42: break; default: break; } /// \endcode /// caseStmt() /// matches 'case 42:'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, CaseStmt> caseStmt; /// Matches default statements inside switch statements. /// /// Given /// \code /// switch(a) { case 42: break; default: break; } /// \endcode /// defaultStmt() /// matches 'default:'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, DefaultStmt> defaultStmt; /// Matches compound statements. /// /// Example matches '{}' and '{{}}' in 'for (;;) {{}}' /// \code /// for (;;) {{}} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CompoundStmt> compoundStmt; /// Matches catch statements. /// /// \code /// try {} catch(int i) {} /// \endcode /// cxxCatchStmt() /// matches 'catch(int i)' extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXCatchStmt> cxxCatchStmt; /// Matches try statements. /// /// \code /// try {} catch(int i) {} /// \endcode /// cxxTryStmt() /// matches 'try {}' extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXTryStmt> cxxTryStmt; /// Matches throw expressions. /// /// \code /// try { throw 5; } catch(int i) {} /// \endcode /// cxxThrowExpr() /// matches 'throw 5' extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXThrowExpr> cxxThrowExpr; /// Matches null statements. /// /// \code /// foo();; /// \endcode /// nullStmt() /// matches the second ';' extern const internal::VariadicDynCastAllOfMatcher<Stmt, NullStmt> nullStmt; /// Matches asm statements. /// /// \code /// int i = 100; /// __asm("mov al, 2"); /// \endcode /// asmStmt() /// matches '__asm("mov al, 2")' extern const internal::VariadicDynCastAllOfMatcher<Stmt, AsmStmt> asmStmt; /// Matches bool literals. /// /// Example matches true /// \code /// true /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXBoolLiteralExpr> cxxBoolLiteral; /// Matches string literals (also matches wide string literals). /// /// Example matches "abcd", L"abcd" /// \code /// char *s = "abcd"; /// wchar_t *ws = L"abcd"; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, StringLiteral> stringLiteral; /// Matches character literals (also matches wchar_t). /// /// Not matching Hex-encoded chars (e.g. 0x1234, which is a IntegerLiteral), /// though. /// /// Example matches 'a', L'a' /// \code /// char ch = 'a'; /// wchar_t chw = L'a'; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CharacterLiteral> characterLiteral; /// Matches integer literals of all sizes / encodings, e.g. /// 1, 1L, 0x1 and 1U. /// /// Does not match character-encoded integers such as L'a'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, IntegerLiteral> integerLiteral; /// Matches float literals of all sizes / encodings, e.g. /// 1.0, 1.0f, 1.0L and 1e10. /// /// Does not match implicit conversions such as /// \code /// float a = 10; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, FloatingLiteral> floatLiteral; /// Matches imaginary literals, which are based on integer and floating /// point literals e.g.: 1i, 1.0i extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImaginaryLiteral> imaginaryLiteral; /// Matches fixed point literals extern const internal::VariadicDynCastAllOfMatcher<Stmt, FixedPointLiteral> fixedPointLiteral; /// Matches user defined literal operator call. /// /// Example match: "foo"_suffix extern const internal::VariadicDynCastAllOfMatcher<Stmt, UserDefinedLiteral> userDefinedLiteral; /// Matches compound (i.e. non-scalar) literals /// /// Example match: {1}, (1, 2) /// \code /// int array[4] = {1}; /// vector int myvec = (vector int)(1, 2); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CompoundLiteralExpr> compoundLiteralExpr; /// Matches nullptr literal. extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNullPtrLiteralExpr> cxxNullPtrLiteralExpr; /// Matches GNU __builtin_choose_expr. extern const internal::VariadicDynCastAllOfMatcher<Stmt, ChooseExpr> chooseExpr; /// Matches GNU __null expression. extern const internal::VariadicDynCastAllOfMatcher<Stmt, GNUNullExpr> gnuNullExpr; /// Matches atomic builtins. /// Example matches __atomic_load_n(ptr, 1) /// \code /// void foo() { int *ptr; __atomic_load_n(ptr, 1); } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, AtomicExpr> atomicExpr; /// Matches statement expression (GNU extension). /// /// Example match: ({ int X = 4; X; }) /// \code /// int C = ({ int X = 4; X; }); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, StmtExpr> stmtExpr; /// Matches binary operator expressions. /// /// Example matches a || b /// \code /// !(a || b) /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, BinaryOperator> binaryOperator; /// Matches unary operator expressions. /// /// Example matches !a /// \code /// !a || b /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnaryOperator> unaryOperator; /// Matches conditional operator expressions. /// /// Example matches a ? b : c /// \code /// (a ? b : c) + 42 /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ConditionalOperator> conditionalOperator; /// Matches binary conditional operator expressions (GNU extension). /// /// Example matches a ?: b /// \code /// (a ?: b) + 42; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, BinaryConditionalOperator> binaryConditionalOperator; /// Matches opaque value expressions. They are used as helpers /// to reference another expressions and can be met /// in BinaryConditionalOperators, for example. /// /// Example matches 'a' /// \code /// (a ?: c) + 42; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, OpaqueValueExpr> opaqueValueExpr; /// Matches a C++ static_assert declaration. /// /// Example: /// staticAssertExpr() /// matches /// static_assert(sizeof(S) == sizeof(int)) /// in /// \code /// struct S { /// int x; /// }; /// static_assert(sizeof(S) == sizeof(int)); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, StaticAssertDecl> staticAssertDecl; /// Matches a reinterpret_cast expression. /// /// Either the source expression or the destination type can be matched /// using has(), but hasDestinationType() is more specific and can be /// more readable. /// /// Example matches reinterpret_cast<char*>(&p) in /// \code /// void* p = reinterpret_cast<char*>(&p); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXReinterpretCastExpr> cxxReinterpretCastExpr; /// Matches a C++ static_cast expression. /// /// \see hasDestinationType /// \see reinterpretCast /// /// Example: /// cxxStaticCastExpr() /// matches /// static_cast<long>(8) /// in /// \code /// long eight(static_cast<long>(8)); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXStaticCastExpr> cxxStaticCastExpr; /// Matches a dynamic_cast expression. /// /// Example: /// cxxDynamicCastExpr() /// matches /// dynamic_cast<D*>(&b); /// in /// \code /// struct B { virtual ~B() {} }; struct D : B {}; /// B b; /// D* p = dynamic_cast<D*>(&b); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDynamicCastExpr> cxxDynamicCastExpr; /// Matches a const_cast expression. /// /// Example: Matches const_cast<int*>(&r) in /// \code /// int n = 42; /// const int &r(n); /// int* p = const_cast<int*>(&r); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXConstCastExpr> cxxConstCastExpr; /// Matches a C-style cast expression. /// /// Example: Matches (int) 2.2f in /// \code /// int i = (int) 2.2f; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CStyleCastExpr> cStyleCastExpr; /// Matches explicit cast expressions. /// /// Matches any cast expression written in user code, whether it be a /// C-style cast, a functional-style cast, or a keyword cast. /// /// Does not match implicit conversions. /// /// Note: the name "explicitCast" is chosen to match Clang's terminology, as /// Clang uses the term "cast" to apply to implicit conversions as well as to /// actual cast expressions. /// /// \see hasDestinationType. /// /// Example: matches all five of the casts in /// \code /// int((int)(reinterpret_cast<int>(static_cast<int>(const_cast<int>(42))))) /// \endcode /// but does not match the implicit conversion in /// \code /// long ell = 42; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ExplicitCastExpr> explicitCastExpr; /// Matches the implicit cast nodes of Clang's AST. /// /// This matches many different places, including function call return value /// eliding, as well as any type conversions. extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImplicitCastExpr> implicitCastExpr; /// Matches any cast nodes of Clang's AST. /// /// Example: castExpr() matches each of the following: /// \code /// (int) 3; /// const_cast<Expr *>(SubExpr); /// char c = 0; /// \endcode /// but does not match /// \code /// int i = (0); /// int k = 0; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CastExpr> castExpr; /// Matches functional cast expressions /// /// Example: Matches Foo(bar); /// \code /// Foo f = bar; /// Foo g = (Foo) bar; /// Foo h = Foo(bar); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXFunctionalCastExpr> cxxFunctionalCastExpr; /// Matches functional cast expressions having N != 1 arguments /// /// Example: Matches Foo(bar, bar) /// \code /// Foo h = Foo(bar, bar); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXTemporaryObjectExpr> cxxTemporaryObjectExpr; /// Matches predefined identifier expressions [C99 6.4.2.2]. /// /// Example: Matches __func__ /// \code /// printf("%s", __func__); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, PredefinedExpr> predefinedExpr; /// Matches C99 designated initializer expressions [C99 6.7.8]. /// /// Example: Matches { [2].y = 1.0, [0].x = 1.0 } /// \code /// point ptarray[10] = { [2].y = 1.0, [0].x = 1.0 }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, DesignatedInitExpr> designatedInitExpr; /// Matches designated initializer expressions that contain /// a specific number of designators. /// /// Example: Given /// \code /// point ptarray[10] = { [2].y = 1.0, [0].x = 1.0 }; /// point ptarray2[10] = { [2].y = 1.0, [2].x = 0.0, [0].x = 1.0 }; /// \endcode /// designatorCountIs(2) /// matches '{ [2].y = 1.0, [0].x = 1.0 }', /// but not '{ [2].y = 1.0, [2].x = 0.0, [0].x = 1.0 }'. AST_MATCHER_P(DesignatedInitExpr, designatorCountIs, unsigned, N) { return Node.size() == N; } /// Matches \c QualTypes in the clang AST. extern const internal::VariadicAllOfMatcher<QualType> qualType; /// Matches \c Types in the clang AST. extern const internal::VariadicAllOfMatcher<Type> type; /// Matches \c TypeLocs in the clang AST. extern const internal::VariadicAllOfMatcher<TypeLoc> typeLoc; /// Matches if any of the given matchers matches. /// /// Unlike \c anyOf, \c eachOf will generate a match result for each /// matching submatcher. /// /// For example, in: /// \code /// class A { int a; int b; }; /// \endcode /// The matcher: /// \code /// cxxRecordDecl(eachOf(has(fieldDecl(hasName("a")).bind("v")), /// has(fieldDecl(hasName("b")).bind("v")))) /// \endcode /// will generate two results binding "v", the first of which binds /// the field declaration of \c a, the second the field declaration of /// \c b. /// /// Usable as: Any Matcher extern const internal::VariadicOperatorMatcherFunc< 2, std::numeric_limits<unsigned>::max()> eachOf; /// Matches if any of the given matchers matches. /// /// Usable as: Any Matcher extern const internal::VariadicOperatorMatcherFunc< 2, std::numeric_limits<unsigned>::max()> anyOf; /// Matches if all given matchers match. /// /// Usable as: Any Matcher extern const internal::VariadicOperatorMatcherFunc< 2, std::numeric_limits<unsigned>::max()> allOf; /// Matches any node regardless of the submatcher. /// /// However, \c optionally will retain any bindings generated by the submatcher. /// Useful when additional information which may or may not present about a main /// matching node is desired. /// /// For example, in: /// \code /// class Foo { /// int bar; /// } /// \endcode /// The matcher: /// \code /// cxxRecordDecl( /// optionally(has( /// fieldDecl(hasName("bar")).bind("var") /// ))).bind("record") /// \endcode /// will produce a result binding for both "record" and "var". /// The matcher will produce a "record" binding for even if there is no data /// member named "bar" in that class. /// /// Usable as: Any Matcher extern const internal::VariadicOperatorMatcherFunc<1, 1> optionally; /// Matches sizeof (C99), alignof (C++11) and vec_step (OpenCL) /// /// Given /// \code /// Foo x = bar; /// int y = sizeof(x) + alignof(x); /// \endcode /// unaryExprOrTypeTraitExpr() /// matches \c sizeof(x) and \c alignof(x) extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnaryExprOrTypeTraitExpr> unaryExprOrTypeTraitExpr; /// Matches unary expressions that have a specific type of argument. /// /// Given /// \code /// int a, c; float b; int s = sizeof(a) + sizeof(b) + alignof(c); /// \endcode /// unaryExprOrTypeTraitExpr(hasArgumentOfType(asString("int")) /// matches \c sizeof(a) and \c alignof(c) AST_MATCHER_P(UnaryExprOrTypeTraitExpr, hasArgumentOfType, internal::Matcher<QualType>, InnerMatcher) { const QualType ArgumentType = Node.getTypeOfArgument(); return InnerMatcher.matches(ArgumentType, Finder, Builder); } /// Matches unary expressions of a certain kind. /// /// Given /// \code /// int x; /// int s = sizeof(x) + alignof(x) /// \endcode /// unaryExprOrTypeTraitExpr(ofKind(UETT_SizeOf)) /// matches \c sizeof(x) /// /// If the matcher is use from clang-query, UnaryExprOrTypeTrait parameter /// should be passed as a quoted string. e.g., ofKind("UETT_SizeOf"). AST_MATCHER_P(UnaryExprOrTypeTraitExpr, ofKind, UnaryExprOrTypeTrait, Kind) { return Node.getKind() == Kind; } /// Same as unaryExprOrTypeTraitExpr, but only matching /// alignof. inline internal::BindableMatcher<Stmt> alignOfExpr( const internal::Matcher<UnaryExprOrTypeTraitExpr> &InnerMatcher) { return stmt(unaryExprOrTypeTraitExpr( allOf(anyOf(ofKind(UETT_AlignOf), ofKind(UETT_PreferredAlignOf)), InnerMatcher))); } /// Same as unaryExprOrTypeTraitExpr, but only matching /// sizeof. inline internal::BindableMatcher<Stmt> sizeOfExpr( const internal::Matcher<UnaryExprOrTypeTraitExpr> &InnerMatcher) { return stmt(unaryExprOrTypeTraitExpr( allOf(ofKind(UETT_SizeOf), InnerMatcher))); } /// Matches NamedDecl nodes that have the specified name. /// /// Supports specifying enclosing namespaces or classes by prefixing the name /// with '<enclosing>::'. /// Does not match typedefs of an underlying type with the given name. /// /// Example matches X (Name == "X") /// \code /// class X; /// \endcode /// /// Example matches X (Name is one of "::a::b::X", "a::b::X", "b::X", "X") /// \code /// namespace a { namespace b { class X; } } /// \endcode inline internal::Matcher<NamedDecl> hasName(StringRef Name) { return internal::Matcher<NamedDecl>( new internal::HasNameMatcher({std::string(Name)})); } /// Matches NamedDecl nodes that have any of the specified names. /// /// This matcher is only provided as a performance optimization of hasName. /// \code /// hasAnyName(a, b, c) /// \endcode /// is equivalent to, but faster than /// \code /// anyOf(hasName(a), hasName(b), hasName(c)) /// \endcode extern const internal::VariadicFunction<internal::Matcher<NamedDecl>, StringRef, internal::hasAnyNameFunc> hasAnyName; /// Matches NamedDecl nodes whose fully qualified names contain /// a substring matched by the given RegExp. /// /// Supports specifying enclosing namespaces or classes by /// prefixing the name with '<enclosing>::'. Does not match typedefs /// of an underlying type with the given name. /// /// Example matches X (regexp == "::X") /// \code /// class X; /// \endcode /// /// Example matches X (regexp is one of "::X", "^foo::.*X", among others) /// \code /// namespace foo { namespace bar { class X; } } /// \endcode AST_MATCHER_REGEX(NamedDecl, matchesName, RegExp) { std::string FullNameString = "::" + Node.getQualifiedNameAsString(); return RegExp->match(FullNameString); } /// Matches overloaded operator names. /// /// Matches overloaded operator names specified in strings without the /// "operator" prefix: e.g. "<<". /// /// Given: /// \code /// class A { int operator*(); }; /// const A &operator<<(const A &a, const A &b); /// A a; /// a << a; // <-- This matches /// \endcode /// /// \c cxxOperatorCallExpr(hasOverloadedOperatorName("<<"))) matches the /// specified line and /// \c cxxRecordDecl(hasMethod(hasOverloadedOperatorName("*"))) /// matches the declaration of \c A. /// /// Usable as: Matcher<CXXOperatorCallExpr>, Matcher<FunctionDecl> inline internal::PolymorphicMatcherWithParam1< internal::HasOverloadedOperatorNameMatcher, std::vector<std::string>, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXOperatorCallExpr, FunctionDecl)> hasOverloadedOperatorName(StringRef Name) { return internal::PolymorphicMatcherWithParam1< internal::HasOverloadedOperatorNameMatcher, std::vector<std::string>, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXOperatorCallExpr, FunctionDecl)>( {std::string(Name)}); } /// Matches overloaded operator names. /// /// Matches overloaded operator names specified in strings without the /// "operator" prefix: e.g. "<<". /// /// hasAnyOverloadesOperatorName("+", "-") /// Is equivalent to /// anyOf(hasOverloadedOperatorName("+"), hasOverloadedOperatorName("-")) extern const internal::VariadicFunction< internal::PolymorphicMatcherWithParam1< internal::HasOverloadedOperatorNameMatcher, std::vector<std::string>, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXOperatorCallExpr, FunctionDecl)>, StringRef, internal::hasAnyOverloadedOperatorNameFunc> hasAnyOverloadedOperatorName; /// Matches C++ classes that are directly or indirectly derived from a class /// matching \c Base, or Objective-C classes that directly or indirectly /// subclass a class matching \c Base. /// /// Note that a class is not considered to be derived from itself. /// /// Example matches Y, Z, C (Base == hasName("X")) /// \code /// class X; /// class Y : public X {}; // directly derived /// class Z : public Y {}; // indirectly derived /// typedef X A; /// typedef A B; /// class C : public B {}; // derived from a typedef of X /// \endcode /// /// In the following example, Bar matches isDerivedFrom(hasName("X")): /// \code /// class Foo; /// typedef Foo X; /// class Bar : public Foo {}; // derived from a type that X is a typedef of /// \endcode /// /// In the following example, Bar matches isDerivedFrom(hasName("NSObject")) /// \code /// @interface NSObject @end /// @interface Bar : NSObject @end /// \endcode /// /// Usable as: Matcher<CXXRecordDecl>, Matcher<ObjCInterfaceDecl> AST_POLYMORPHIC_MATCHER_P( isDerivedFrom, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl), internal::Matcher<NamedDecl>, Base) { // Check if the node is a C++ struct/union/class. if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node)) return Finder->classIsDerivedFrom(RD, Base, Builder, /*Directly=*/false); // The node must be an Objective-C class. const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node); return Finder->objcClassIsDerivedFrom(InterfaceDecl, Base, Builder, /*Directly=*/false); } /// Overloaded method as shortcut for \c isDerivedFrom(hasName(...)). AST_POLYMORPHIC_MATCHER_P_OVERLOAD( isDerivedFrom, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl), std::string, BaseName, 1) { if (BaseName.empty()) return false; const auto M = isDerivedFrom(hasName(BaseName)); if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node)) return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder); const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node); return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder); } /// Matches C++ classes that have a direct or indirect base matching \p /// BaseSpecMatcher. /// /// Example: /// matcher hasAnyBase(hasType(cxxRecordDecl(hasName("SpecialBase")))) /// \code /// class Foo; /// class Bar : Foo {}; /// class Baz : Bar {}; /// class SpecialBase; /// class Proxy : SpecialBase {}; // matches Proxy /// class IndirectlyDerived : Proxy {}; //matches IndirectlyDerived /// \endcode /// // FIXME: Refactor this and isDerivedFrom to reuse implementation. AST_MATCHER_P(CXXRecordDecl, hasAnyBase, internal::Matcher<CXXBaseSpecifier>, BaseSpecMatcher) { return internal::matchesAnyBase(Node, BaseSpecMatcher, Finder, Builder); } /// Matches C++ classes that have a direct base matching \p BaseSpecMatcher. /// /// Example: /// matcher hasDirectBase(hasType(cxxRecordDecl(hasName("SpecialBase")))) /// \code /// class Foo; /// class Bar : Foo {}; /// class Baz : Bar {}; /// class SpecialBase; /// class Proxy : SpecialBase {}; // matches Proxy /// class IndirectlyDerived : Proxy {}; // doesn't match /// \endcode AST_MATCHER_P(CXXRecordDecl, hasDirectBase, internal::Matcher<CXXBaseSpecifier>, BaseSpecMatcher) { return Node.hasDefinition() && llvm::any_of(Node.bases(), [&](const CXXBaseSpecifier &Base) { return BaseSpecMatcher.matches(Base, Finder, Builder); }); } /// Similar to \c isDerivedFrom(), but also matches classes that directly /// match \c Base. AST_POLYMORPHIC_MATCHER_P_OVERLOAD( isSameOrDerivedFrom, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl), internal::Matcher<NamedDecl>, Base, 0) { const auto M = anyOf(Base, isDerivedFrom(Base)); if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node)) return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder); const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node); return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder); } /// Overloaded method as shortcut for /// \c isSameOrDerivedFrom(hasName(...)). AST_POLYMORPHIC_MATCHER_P_OVERLOAD( isSameOrDerivedFrom, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl), std::string, BaseName, 1) { if (BaseName.empty()) return false; const auto M = isSameOrDerivedFrom(hasName(BaseName)); if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node)) return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder); const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node); return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder); } /// Matches C++ or Objective-C classes that are directly derived from a class /// matching \c Base. /// /// Note that a class is not considered to be derived from itself. /// /// Example matches Y, C (Base == hasName("X")) /// \code /// class X; /// class Y : public X {}; // directly derived /// class Z : public Y {}; // indirectly derived /// typedef X A; /// typedef A B; /// class C : public B {}; // derived from a typedef of X /// \endcode /// /// In the following example, Bar matches isDerivedFrom(hasName("X")): /// \code /// class Foo; /// typedef Foo X; /// class Bar : public Foo {}; // derived from a type that X is a typedef of /// \endcode AST_POLYMORPHIC_MATCHER_P_OVERLOAD( isDirectlyDerivedFrom, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl), internal::Matcher<NamedDecl>, Base, 0) { // Check if the node is a C++ struct/union/class. if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node)) return Finder->classIsDerivedFrom(RD, Base, Builder, /*Directly=*/true); // The node must be an Objective-C class. const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node); return Finder->objcClassIsDerivedFrom(InterfaceDecl, Base, Builder, /*Directly=*/true); } /// Overloaded method as shortcut for \c isDirectlyDerivedFrom(hasName(...)). AST_POLYMORPHIC_MATCHER_P_OVERLOAD( isDirectlyDerivedFrom, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl), std::string, BaseName, 1) { if (BaseName.empty()) return false; const auto M = isDirectlyDerivedFrom(hasName(BaseName)); if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node)) return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder); const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node); return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder); } /// Matches the first method of a class or struct that satisfies \c /// InnerMatcher. /// /// Given: /// \code /// class A { void func(); }; /// class B { void member(); }; /// \endcode /// /// \c cxxRecordDecl(hasMethod(hasName("func"))) matches the declaration of /// \c A but not \c B. AST_MATCHER_P(CXXRecordDecl, hasMethod, internal::Matcher<CXXMethodDecl>, InnerMatcher) { return matchesFirstInPointerRange(InnerMatcher, Node.method_begin(), Node.method_end(), Finder, Builder); } /// Matches the generated class of lambda expressions. /// /// Given: /// \code /// auto x = []{}; /// \endcode /// /// \c cxxRecordDecl(isLambda()) matches the implicit class declaration of /// \c decltype(x) AST_MATCHER(CXXRecordDecl, isLambda) { return Node.isLambda(); } /// Matches AST nodes that have child AST nodes that match the /// provided matcher. /// /// Example matches X, Y /// (matcher = cxxRecordDecl(has(cxxRecordDecl(hasName("X"))) /// \code /// class X {}; // Matches X, because X::X is a class of name X inside X. /// class Y { class X {}; }; /// class Z { class Y { class X {}; }; }; // Does not match Z. /// \endcode /// /// ChildT must be an AST base type. /// /// Usable as: Any Matcher /// Note that has is direct matcher, so it also matches things like implicit /// casts and paren casts. If you are matching with expr then you should /// probably consider using ignoringParenImpCasts like: /// has(ignoringParenImpCasts(expr())). extern const internal::ArgumentAdaptingMatcherFunc<internal::HasMatcher> has; /// Matches AST nodes that have descendant AST nodes that match the /// provided matcher. /// /// Example matches X, Y, Z /// (matcher = cxxRecordDecl(hasDescendant(cxxRecordDecl(hasName("X"))))) /// \code /// class X {}; // Matches X, because X::X is a class of name X inside X. /// class Y { class X {}; }; /// class Z { class Y { class X {}; }; }; /// \endcode /// /// DescendantT must be an AST base type. /// /// Usable as: Any Matcher extern const internal::ArgumentAdaptingMatcherFunc< internal::HasDescendantMatcher> hasDescendant; /// Matches AST nodes that have child AST nodes that match the /// provided matcher. /// /// Example matches X, Y, Y::X, Z::Y, Z::Y::X /// (matcher = cxxRecordDecl(forEach(cxxRecordDecl(hasName("X"))) /// \code /// class X {}; /// class Y { class X {}; }; // Matches Y, because Y::X is a class of name X /// // inside Y. /// class Z { class Y { class X {}; }; }; // Does not match Z. /// \endcode /// /// ChildT must be an AST base type. /// /// As opposed to 'has', 'forEach' will cause a match for each result that /// matches instead of only on the first one. /// /// Usable as: Any Matcher extern const internal::ArgumentAdaptingMatcherFunc<internal::ForEachMatcher> forEach; /// Matches AST nodes that have descendant AST nodes that match the /// provided matcher. /// /// Example matches X, A, A::X, B, B::C, B::C::X /// (matcher = cxxRecordDecl(forEachDescendant(cxxRecordDecl(hasName("X"))))) /// \code /// class X {}; /// class A { class X {}; }; // Matches A, because A::X is a class of name /// // X inside A. /// class B { class C { class X {}; }; }; /// \endcode /// /// DescendantT must be an AST base type. /// /// As opposed to 'hasDescendant', 'forEachDescendant' will cause a match for /// each result that matches instead of only on the first one. /// /// Note: Recursively combined ForEachDescendant can cause many matches: /// cxxRecordDecl(forEachDescendant(cxxRecordDecl( /// forEachDescendant(cxxRecordDecl()) /// ))) /// will match 10 times (plus injected class name matches) on: /// \code /// class A { class B { class C { class D { class E {}; }; }; }; }; /// \endcode /// /// Usable as: Any Matcher extern const internal::ArgumentAdaptingMatcherFunc< internal::ForEachDescendantMatcher> forEachDescendant; /// Matches if the node or any descendant matches. /// /// Generates results for each match. /// /// For example, in: /// \code /// class A { class B {}; class C {}; }; /// \endcode /// The matcher: /// \code /// cxxRecordDecl(hasName("::A"), /// findAll(cxxRecordDecl(isDefinition()).bind("m"))) /// \endcode /// will generate results for \c A, \c B and \c C. /// /// Usable as: Any Matcher template <typename T> internal::Matcher<T> findAll(const internal::Matcher<T> &Matcher) { return eachOf(Matcher, forEachDescendant(Matcher)); } /// Matches AST nodes that have a parent that matches the provided /// matcher. /// /// Given /// \code /// void f() { for (;;) { int x = 42; if (true) { int x = 43; } } } /// \endcode /// \c compoundStmt(hasParent(ifStmt())) matches "{ int x = 43; }". /// /// Usable as: Any Matcher extern const internal::ArgumentAdaptingMatcherFunc< internal::HasParentMatcher, internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>, internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>> hasParent; /// Matches AST nodes that have an ancestor that matches the provided /// matcher. /// /// Given /// \code /// void f() { if (true) { int x = 42; } } /// void g() { for (;;) { int x = 43; } } /// \endcode /// \c expr(integerLiteral(hasAncestor(ifStmt()))) matches \c 42, but not 43. /// /// Usable as: Any Matcher extern const internal::ArgumentAdaptingMatcherFunc< internal::HasAncestorMatcher, internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>, internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>> hasAncestor; /// Matches if the provided matcher does not match. /// /// Example matches Y (matcher = cxxRecordDecl(unless(hasName("X")))) /// \code /// class X {}; /// class Y {}; /// \endcode /// /// Usable as: Any Matcher extern const internal::VariadicOperatorMatcherFunc<1, 1> unless; /// Matches a node if the declaration associated with that node /// matches the given matcher. /// /// The associated declaration is: /// - for type nodes, the declaration of the underlying type /// - for CallExpr, the declaration of the callee /// - for MemberExpr, the declaration of the referenced member /// - for CXXConstructExpr, the declaration of the constructor /// - for CXXNewExpr, the declaration of the operator new /// - for ObjCIvarExpr, the declaration of the ivar /// /// For type nodes, hasDeclaration will generally match the declaration of the /// sugared type. Given /// \code /// class X {}; /// typedef X Y; /// Y y; /// \endcode /// in varDecl(hasType(hasDeclaration(decl()))) the decl will match the /// typedefDecl. A common use case is to match the underlying, desugared type. /// This can be achieved by using the hasUnqualifiedDesugaredType matcher: /// \code /// varDecl(hasType(hasUnqualifiedDesugaredType( /// recordType(hasDeclaration(decl()))))) /// \endcode /// In this matcher, the decl will match the CXXRecordDecl of class X. /// /// Usable as: Matcher<AddrLabelExpr>, Matcher<CallExpr>, /// Matcher<CXXConstructExpr>, Matcher<CXXNewExpr>, Matcher<DeclRefExpr>, /// Matcher<EnumType>, Matcher<InjectedClassNameType>, Matcher<LabelStmt>, /// Matcher<MemberExpr>, Matcher<QualType>, Matcher<RecordType>, /// Matcher<TagType>, Matcher<TemplateSpecializationType>, /// Matcher<TemplateTypeParmType>, Matcher<TypedefType>, /// Matcher<UnresolvedUsingType> inline internal::PolymorphicMatcherWithParam1< internal::HasDeclarationMatcher, internal::Matcher<Decl>, void(internal::HasDeclarationSupportedTypes)> hasDeclaration(const internal::Matcher<Decl> &InnerMatcher) { return internal::PolymorphicMatcherWithParam1< internal::HasDeclarationMatcher, internal::Matcher<Decl>, void(internal::HasDeclarationSupportedTypes)>(InnerMatcher); } /// Matches a \c NamedDecl whose underlying declaration matches the given /// matcher. /// /// Given /// \code /// namespace N { template<class T> void f(T t); } /// template <class T> void g() { using N::f; f(T()); } /// \endcode /// \c unresolvedLookupExpr(hasAnyDeclaration( /// namedDecl(hasUnderlyingDecl(hasName("::N::f"))))) /// matches the use of \c f in \c g() . AST_MATCHER_P(NamedDecl, hasUnderlyingDecl, internal::Matcher<NamedDecl>, InnerMatcher) { const NamedDecl *UnderlyingDecl = Node.getUnderlyingDecl(); return UnderlyingDecl != nullptr && InnerMatcher.matches(*UnderlyingDecl, Finder, Builder); } /// Matches on the implicit object argument of a member call expression, after /// stripping off any parentheses or implicit casts. /// /// Given /// \code /// class Y { public: void m(); }; /// Y g(); /// class X : public Y {}; /// void z(Y y, X x) { y.m(); (g()).m(); x.m(); } /// \endcode /// cxxMemberCallExpr(on(hasType(cxxRecordDecl(hasName("Y"))))) /// matches `y.m()` and `(g()).m()`. /// cxxMemberCallExpr(on(hasType(cxxRecordDecl(hasName("X"))))) /// matches `x.m()`. /// cxxMemberCallExpr(on(callExpr())) /// matches `(g()).m()`. /// /// FIXME: Overload to allow directly matching types? AST_MATCHER_P(CXXMemberCallExpr, on, internal::Matcher<Expr>, InnerMatcher) { const Expr *ExprNode = Node.getImplicitObjectArgument() ->IgnoreParenImpCasts(); return (ExprNode != nullptr && InnerMatcher.matches(*ExprNode, Finder, Builder)); } /// Matches on the receiver of an ObjectiveC Message expression. /// /// Example /// matcher = objCMessageExpr(hasReceiverType(asString("UIWebView *"))); /// matches the [webView ...] message invocation. /// \code /// NSString *webViewJavaScript = ... /// UIWebView *webView = ... /// [webView stringByEvaluatingJavaScriptFromString:webViewJavascript]; /// \endcode AST_MATCHER_P(ObjCMessageExpr, hasReceiverType, internal::Matcher<QualType>, InnerMatcher) { const QualType TypeDecl = Node.getReceiverType(); return InnerMatcher.matches(TypeDecl, Finder, Builder); } /// Returns true when the Objective-C method declaration is a class method. /// /// Example /// matcher = objcMethodDecl(isClassMethod()) /// matches /// \code /// @interface I + (void)foo; @end /// \endcode /// but not /// \code /// @interface I - (void)bar; @end /// \endcode AST_MATCHER(ObjCMethodDecl, isClassMethod) { return Node.isClassMethod(); } /// Returns true when the Objective-C method declaration is an instance method. /// /// Example /// matcher = objcMethodDecl(isInstanceMethod()) /// matches /// \code /// @interface I - (void)bar; @end /// \endcode /// but not /// \code /// @interface I + (void)foo; @end /// \endcode AST_MATCHER(ObjCMethodDecl, isInstanceMethod) { return Node.isInstanceMethod(); } /// Returns true when the Objective-C message is sent to a class. /// /// Example /// matcher = objcMessageExpr(isClassMessage()) /// matches /// \code /// [NSString stringWithFormat:@"format"]; /// \endcode /// but not /// \code /// NSString *x = @"hello"; /// [x containsString:@"h"]; /// \endcode AST_MATCHER(ObjCMessageExpr, isClassMessage) { return Node.isClassMessage(); } /// Returns true when the Objective-C message is sent to an instance. /// /// Example /// matcher = objcMessageExpr(isInstanceMessage()) /// matches /// \code /// NSString *x = @"hello"; /// [x containsString:@"h"]; /// \endcode /// but not /// \code /// [NSString stringWithFormat:@"format"]; /// \endcode AST_MATCHER(ObjCMessageExpr, isInstanceMessage) { return Node.isInstanceMessage(); } /// Matches if the Objective-C message is sent to an instance, /// and the inner matcher matches on that instance. /// /// For example the method call in /// \code /// NSString *x = @"hello"; /// [x containsString:@"h"]; /// \endcode /// is matched by /// objcMessageExpr(hasReceiver(declRefExpr(to(varDecl(hasName("x")))))) AST_MATCHER_P(ObjCMessageExpr, hasReceiver, internal::Matcher<Expr>, InnerMatcher) { const Expr *ReceiverNode = Node.getInstanceReceiver(); return (ReceiverNode != nullptr && InnerMatcher.matches(*ReceiverNode->IgnoreParenImpCasts(), Finder, Builder)); } /// Matches when BaseName == Selector.getAsString() /// /// matcher = objCMessageExpr(hasSelector("loadHTMLString:baseURL:")); /// matches the outer message expr in the code below, but NOT the message /// invocation for self.bodyView. /// \code /// [self.bodyView loadHTMLString:html baseURL:NULL]; /// \endcode AST_MATCHER_P(ObjCMessageExpr, hasSelector, std::string, BaseName) { Selector Sel = Node.getSelector(); return BaseName.compare(Sel.getAsString()) == 0; } /// Matches when at least one of the supplied string equals to the /// Selector.getAsString() /// /// matcher = objCMessageExpr(hasSelector("methodA:", "methodB:")); /// matches both of the expressions below: /// \code /// [myObj methodA:argA]; /// [myObj methodB:argB]; /// \endcode extern const internal::VariadicFunction<internal::Matcher<ObjCMessageExpr>, StringRef, internal::hasAnySelectorFunc> hasAnySelector; /// Matches ObjC selectors whose name contains /// a substring matched by the given RegExp. /// matcher = objCMessageExpr(matchesSelector("loadHTMLString\:baseURL?")); /// matches the outer message expr in the code below, but NOT the message /// invocation for self.bodyView. /// \code /// [self.bodyView loadHTMLString:html baseURL:NULL]; /// \endcode AST_MATCHER_REGEX(ObjCMessageExpr, matchesSelector, RegExp) { std::string SelectorString = Node.getSelector().getAsString(); return RegExp->match(SelectorString); } /// Matches when the selector is the empty selector /// /// Matches only when the selector of the objCMessageExpr is NULL. This may /// represent an error condition in the tree! AST_MATCHER(ObjCMessageExpr, hasNullSelector) { return Node.getSelector().isNull(); } /// Matches when the selector is a Unary Selector /// /// matcher = objCMessageExpr(matchesSelector(hasUnarySelector()); /// matches self.bodyView in the code below, but NOT the outer message /// invocation of "loadHTMLString:baseURL:". /// \code /// [self.bodyView loadHTMLString:html baseURL:NULL]; /// \endcode AST_MATCHER(ObjCMessageExpr, hasUnarySelector) { return Node.getSelector().isUnarySelector(); } /// Matches when the selector is a keyword selector /// /// objCMessageExpr(hasKeywordSelector()) matches the generated setFrame /// message expression in /// /// \code /// UIWebView *webView = ...; /// CGRect bodyFrame = webView.frame; /// bodyFrame.size.height = self.bodyContentHeight; /// webView.frame = bodyFrame; /// // ^---- matches here /// \endcode AST_MATCHER(ObjCMessageExpr, hasKeywordSelector) { return Node.getSelector().isKeywordSelector(); } /// Matches when the selector has the specified number of arguments /// /// matcher = objCMessageExpr(numSelectorArgs(0)); /// matches self.bodyView in the code below /// /// matcher = objCMessageExpr(numSelectorArgs(2)); /// matches the invocation of "loadHTMLString:baseURL:" but not that /// of self.bodyView /// \code /// [self.bodyView loadHTMLString:html baseURL:NULL]; /// \endcode AST_MATCHER_P(ObjCMessageExpr, numSelectorArgs, unsigned, N) { return Node.getSelector().getNumArgs() == N; } /// Matches if the call expression's callee expression matches. /// /// Given /// \code /// class Y { void x() { this->x(); x(); Y y; y.x(); } }; /// void f() { f(); } /// \endcode /// callExpr(callee(expr())) /// matches this->x(), x(), y.x(), f() /// with callee(...) /// matching this->x, x, y.x, f respectively /// /// Note: Callee cannot take the more general internal::Matcher<Expr> /// because this introduces ambiguous overloads with calls to Callee taking a /// internal::Matcher<Decl>, as the matcher hierarchy is purely /// implemented in terms of implicit casts. AST_MATCHER_P(CallExpr, callee, internal::Matcher<Stmt>, InnerMatcher) { const Expr *ExprNode = Node.getCallee(); return (ExprNode != nullptr && InnerMatcher.matches(*ExprNode, Finder, Builder)); } /// Matches if the call expression's callee's declaration matches the /// given matcher. /// /// Example matches y.x() (matcher = callExpr(callee( /// cxxMethodDecl(hasName("x"))))) /// \code /// class Y { public: void x(); }; /// void z() { Y y; y.x(); } /// \endcode AST_MATCHER_P_OVERLOAD(CallExpr, callee, internal::Matcher<Decl>, InnerMatcher, 1) { return callExpr(hasDeclaration(InnerMatcher)).matches(Node, Finder, Builder); } /// Matches if the expression's or declaration's type matches a type /// matcher. /// /// Example matches x (matcher = expr(hasType(cxxRecordDecl(hasName("X"))))) /// and z (matcher = varDecl(hasType(cxxRecordDecl(hasName("X"))))) /// and U (matcher = typedefDecl(hasType(asString("int"))) /// and friend class X (matcher = friendDecl(hasType("X")) /// \code /// class X {}; /// void y(X &x) { x; X z; } /// typedef int U; /// class Y { friend class X; }; /// \endcode AST_POLYMORPHIC_MATCHER_P_OVERLOAD( hasType, AST_POLYMORPHIC_SUPPORTED_TYPES(Expr, FriendDecl, TypedefNameDecl, ValueDecl), internal::Matcher<QualType>, InnerMatcher, 0) { QualType QT = internal::getUnderlyingType(Node); if (!QT.isNull()) return InnerMatcher.matches(QT, Finder, Builder); return false; } /// Overloaded to match the declaration of the expression's or value /// declaration's type. /// /// In case of a value declaration (for example a variable declaration), /// this resolves one layer of indirection. For example, in the value /// declaration "X x;", cxxRecordDecl(hasName("X")) matches the declaration of /// X, while varDecl(hasType(cxxRecordDecl(hasName("X")))) matches the /// declaration of x. /// /// Example matches x (matcher = expr(hasType(cxxRecordDecl(hasName("X"))))) /// and z (matcher = varDecl(hasType(cxxRecordDecl(hasName("X"))))) /// and friend class X (matcher = friendDecl(hasType("X")) /// \code /// class X {}; /// void y(X &x) { x; X z; } /// class Y { friend class X; }; /// \endcode /// /// Example matches class Derived /// (matcher = cxxRecordDecl(hasAnyBase(hasType(cxxRecordDecl(hasName("Base")))))) /// \code /// class Base {}; /// class Derived : Base {}; /// \endcode /// /// Usable as: Matcher<Expr>, Matcher<FriendDecl>, Matcher<ValueDecl>, /// Matcher<CXXBaseSpecifier> AST_POLYMORPHIC_MATCHER_P_OVERLOAD( hasType, AST_POLYMORPHIC_SUPPORTED_TYPES(Expr, FriendDecl, ValueDecl, CXXBaseSpecifier), internal::Matcher<Decl>, InnerMatcher, 1) { QualType QT = internal::getUnderlyingType(Node); if (!QT.isNull()) return qualType(hasDeclaration(InnerMatcher)).matches(QT, Finder, Builder); return false; } /// Matches if the type location of the declarator decl's type matches /// the inner matcher. /// /// Given /// \code /// int x; /// \endcode /// declaratorDecl(hasTypeLoc(loc(asString("int")))) /// matches int x AST_MATCHER_P(DeclaratorDecl, hasTypeLoc, internal::Matcher<TypeLoc>, Inner) { if (!Node.getTypeSourceInfo()) // This happens for example for implicit destructors. return false; return Inner.matches(Node.getTypeSourceInfo()->getTypeLoc(), Finder, Builder); } /// Matches if the matched type is represented by the given string. /// /// Given /// \code /// class Y { public: void x(); }; /// void z() { Y* y; y->x(); } /// \endcode /// cxxMemberCallExpr(on(hasType(asString("class Y *")))) /// matches y->x() AST_MATCHER_P(QualType, asString, std::string, Name) { return Name == Node.getAsString(); } /// Matches if the matched type is a pointer type and the pointee type /// matches the specified matcher. /// /// Example matches y->x() /// (matcher = cxxMemberCallExpr(on(hasType(pointsTo /// cxxRecordDecl(hasName("Y"))))))) /// \code /// class Y { public: void x(); }; /// void z() { Y *y; y->x(); } /// \endcode AST_MATCHER_P( QualType, pointsTo, internal::Matcher<QualType>, InnerMatcher) { return (!Node.isNull() && Node->isAnyPointerType() && InnerMatcher.matches(Node->getPointeeType(), Finder, Builder)); } /// Overloaded to match the pointee type's declaration. AST_MATCHER_P_OVERLOAD(QualType, pointsTo, internal::Matcher<Decl>, InnerMatcher, 1) { return pointsTo(qualType(hasDeclaration(InnerMatcher))) .matches(Node, Finder, Builder); } /// Matches if the matched type matches the unqualified desugared /// type of the matched node. /// /// For example, in: /// \code /// class A {}; /// using B = A; /// \endcode /// The matcher type(hasUnqualifiedDesugaredType(recordType())) matches /// both B and A. AST_MATCHER_P(Type, hasUnqualifiedDesugaredType, internal::Matcher<Type>, InnerMatcher) { return InnerMatcher.matches(*Node.getUnqualifiedDesugaredType(), Finder, Builder); } /// Matches if the matched type is a reference type and the referenced /// type matches the specified matcher. /// /// Example matches X &x and const X &y /// (matcher = varDecl(hasType(references(cxxRecordDecl(hasName("X")))))) /// \code /// class X { /// void a(X b) { /// X &x = b; /// const X &y = b; /// } /// }; /// \endcode AST_MATCHER_P(QualType, references, internal::Matcher<QualType>, InnerMatcher) { return (!Node.isNull() && Node->isReferenceType() && InnerMatcher.matches(Node->getPointeeType(), Finder, Builder)); } /// Matches QualTypes whose canonical type matches InnerMatcher. /// /// Given: /// \code /// typedef int &int_ref; /// int a; /// int_ref b = a; /// \endcode /// /// \c varDecl(hasType(qualType(referenceType()))))) will not match the /// declaration of b but \c /// varDecl(hasType(qualType(hasCanonicalType(referenceType())))))) does. AST_MATCHER_P(QualType, hasCanonicalType, internal::Matcher<QualType>, InnerMatcher) { if (Node.isNull()) return false; return InnerMatcher.matches(Node.getCanonicalType(), Finder, Builder); } /// Overloaded to match the referenced type's declaration. AST_MATCHER_P_OVERLOAD(QualType, references, internal::Matcher<Decl>, InnerMatcher, 1) { return references(qualType(hasDeclaration(InnerMatcher))) .matches(Node, Finder, Builder); } /// Matches on the implicit object argument of a member call expression. Unlike /// `on`, matches the argument directly without stripping away anything. /// /// Given /// \code /// class Y { public: void m(); }; /// Y g(); /// class X : public Y { void g(); }; /// void z(Y y, X x) { y.m(); x.m(); x.g(); (g()).m(); } /// \endcode /// cxxMemberCallExpr(onImplicitObjectArgument(hasType( /// cxxRecordDecl(hasName("Y"))))) /// matches `y.m()`, `x.m()` and (g()).m(), but not `x.g()`. /// cxxMemberCallExpr(on(callExpr())) /// does not match `(g()).m()`, because the parens are not ignored. /// /// FIXME: Overload to allow directly matching types? AST_MATCHER_P(CXXMemberCallExpr, onImplicitObjectArgument, internal::Matcher<Expr>, InnerMatcher) { const Expr *ExprNode = Node.getImplicitObjectArgument(); return (ExprNode != nullptr && InnerMatcher.matches(*ExprNode, Finder, Builder)); } /// Matches if the type of the expression's implicit object argument either /// matches the InnerMatcher, or is a pointer to a type that matches the /// InnerMatcher. /// /// Given /// \code /// class Y { public: void m(); }; /// class X : public Y { void g(); }; /// void z() { Y y; y.m(); Y *p; p->m(); X x; x.m(); x.g(); } /// \endcode /// cxxMemberCallExpr(thisPointerType(hasDeclaration( /// cxxRecordDecl(hasName("Y"))))) /// matches `y.m()`, `p->m()` and `x.m()`. /// cxxMemberCallExpr(thisPointerType(hasDeclaration( /// cxxRecordDecl(hasName("X"))))) /// matches `x.g()`. AST_MATCHER_P_OVERLOAD(CXXMemberCallExpr, thisPointerType, internal::Matcher<QualType>, InnerMatcher, 0) { return onImplicitObjectArgument( anyOf(hasType(InnerMatcher), hasType(pointsTo(InnerMatcher)))) .matches(Node, Finder, Builder); } /// Overloaded to match the type's declaration. AST_MATCHER_P_OVERLOAD(CXXMemberCallExpr, thisPointerType, internal::Matcher<Decl>, InnerMatcher, 1) { return onImplicitObjectArgument( anyOf(hasType(InnerMatcher), hasType(pointsTo(InnerMatcher)))) .matches(Node, Finder, Builder); } /// Matches a DeclRefExpr that refers to a declaration that matches the /// specified matcher. /// /// Example matches x in if(x) /// (matcher = declRefExpr(to(varDecl(hasName("x"))))) /// \code /// bool x; /// if (x) {} /// \endcode AST_MATCHER_P(DeclRefExpr, to, internal::Matcher<Decl>, InnerMatcher) { const Decl *DeclNode = Node.getDecl(); return (DeclNode != nullptr && InnerMatcher.matches(*DeclNode, Finder, Builder)); } /// Matches a \c DeclRefExpr that refers to a declaration through a /// specific using shadow declaration. /// /// Given /// \code /// namespace a { void f() {} } /// using a::f; /// void g() { /// f(); // Matches this .. /// a::f(); // .. but not this. /// } /// \endcode /// declRefExpr(throughUsingDecl(anything())) /// matches \c f() AST_MATCHER_P(DeclRefExpr, throughUsingDecl, internal::Matcher<UsingShadowDecl>, InnerMatcher) { const NamedDecl *FoundDecl = Node.getFoundDecl(); if (const UsingShadowDecl *UsingDecl = dyn_cast<UsingShadowDecl>(FoundDecl)) return InnerMatcher.matches(*UsingDecl, Finder, Builder); return false; } /// Matches an \c OverloadExpr if any of the declarations in the set of /// overloads matches the given matcher. /// /// Given /// \code /// template <typename T> void foo(T); /// template <typename T> void bar(T); /// template <typename T> void baz(T t) { /// foo(t); /// bar(t); /// } /// \endcode /// unresolvedLookupExpr(hasAnyDeclaration( /// functionTemplateDecl(hasName("foo")))) /// matches \c foo in \c foo(t); but not \c bar in \c bar(t); AST_MATCHER_P(OverloadExpr, hasAnyDeclaration, internal::Matcher<Decl>, InnerMatcher) { return matchesFirstInPointerRange(InnerMatcher, Node.decls_begin(), Node.decls_end(), Finder, Builder); } /// Matches the Decl of a DeclStmt which has a single declaration. /// /// Given /// \code /// int a, b; /// int c; /// \endcode /// declStmt(hasSingleDecl(anything())) /// matches 'int c;' but not 'int a, b;'. AST_MATCHER_P(DeclStmt, hasSingleDecl, internal::Matcher<Decl>, InnerMatcher) { if (Node.isSingleDecl()) { const Decl *FoundDecl = Node.getSingleDecl(); return InnerMatcher.matches(*FoundDecl, Finder, Builder); } return false; } /// Matches a variable declaration that has an initializer expression /// that matches the given matcher. /// /// Example matches x (matcher = varDecl(hasInitializer(callExpr()))) /// \code /// bool y() { return true; } /// bool x = y(); /// \endcode AST_MATCHER_P( VarDecl, hasInitializer, internal::Matcher<Expr>, InnerMatcher) { const Expr *Initializer = Node.getAnyInitializer(); return (Initializer != nullptr && InnerMatcher.matches(*Initializer, Finder, Builder)); } /// \brief Matches a static variable with local scope. /// /// Example matches y (matcher = varDecl(isStaticLocal())) /// \code /// void f() { /// int x; /// static int y; /// } /// static int z; /// \endcode AST_MATCHER(VarDecl, isStaticLocal) { return Node.isStaticLocal(); } /// Matches a variable declaration that has function scope and is a /// non-static local variable. /// /// Example matches x (matcher = varDecl(hasLocalStorage()) /// \code /// void f() { /// int x; /// static int y; /// } /// int z; /// \endcode AST_MATCHER(VarDecl, hasLocalStorage) { return Node.hasLocalStorage(); } /// Matches a variable declaration that does not have local storage. /// /// Example matches y and z (matcher = varDecl(hasGlobalStorage()) /// \code /// void f() { /// int x; /// static int y; /// } /// int z; /// \endcode AST_MATCHER(VarDecl, hasGlobalStorage) { return Node.hasGlobalStorage(); } /// Matches a variable declaration that has automatic storage duration. /// /// Example matches x, but not y, z, or a. /// (matcher = varDecl(hasAutomaticStorageDuration()) /// \code /// void f() { /// int x; /// static int y; /// thread_local int z; /// } /// int a; /// \endcode AST_MATCHER(VarDecl, hasAutomaticStorageDuration) { return Node.getStorageDuration() == SD_Automatic; } /// Matches a variable declaration that has static storage duration. /// It includes the variable declared at namespace scope and those declared /// with "static" and "extern" storage class specifiers. /// /// \code /// void f() { /// int x; /// static int y; /// thread_local int z; /// } /// int a; /// static int b; /// extern int c; /// varDecl(hasStaticStorageDuration()) /// matches the function declaration y, a, b and c. /// \endcode AST_MATCHER(VarDecl, hasStaticStorageDuration) { return Node.getStorageDuration() == SD_Static; } /// Matches a variable declaration that has thread storage duration. /// /// Example matches z, but not x, z, or a. /// (matcher = varDecl(hasThreadStorageDuration()) /// \code /// void f() { /// int x; /// static int y; /// thread_local int z; /// } /// int a; /// \endcode AST_MATCHER(VarDecl, hasThreadStorageDuration) { return Node.getStorageDuration() == SD_Thread; } /// Matches a variable declaration that is an exception variable from /// a C++ catch block, or an Objective-C \@catch statement. /// /// Example matches x (matcher = varDecl(isExceptionVariable()) /// \code /// void f(int y) { /// try { /// } catch (int x) { /// } /// } /// \endcode AST_MATCHER(VarDecl, isExceptionVariable) { return Node.isExceptionVariable(); } /// Checks that a call expression or a constructor call expression has /// a specific number of arguments (including absent default arguments). /// /// Example matches f(0, 0) (matcher = callExpr(argumentCountIs(2))) /// \code /// void f(int x, int y); /// f(0, 0); /// \endcode AST_POLYMORPHIC_MATCHER_P(argumentCountIs, AST_POLYMORPHIC_SUPPORTED_TYPES(CallExpr, CXXConstructExpr, ObjCMessageExpr), unsigned, N) { return Node.getNumArgs() == N; } /// Matches the n'th argument of a call expression or a constructor /// call expression. /// /// Example matches y in x(y) /// (matcher = callExpr(hasArgument(0, declRefExpr()))) /// \code /// void x(int) { int y; x(y); } /// \endcode AST_POLYMORPHIC_MATCHER_P2(hasArgument, AST_POLYMORPHIC_SUPPORTED_TYPES(CallExpr, CXXConstructExpr, ObjCMessageExpr), unsigned, N, internal::Matcher<Expr>, InnerMatcher) { return (N < Node.getNumArgs() && InnerMatcher.matches( *Node.getArg(N)->IgnoreParenImpCasts(), Finder, Builder)); } /// Matches the n'th item of an initializer list expression. /// /// Example matches y. /// (matcher = initListExpr(hasInit(0, expr()))) /// \code /// int x{y}. /// \endcode AST_MATCHER_P2(InitListExpr, hasInit, unsigned, N, ast_matchers::internal::Matcher<Expr>, InnerMatcher) { return N < Node.getNumInits() && InnerMatcher.matches(*Node.getInit(N), Finder, Builder); } /// Matches declaration statements that contain a specific number of /// declarations. /// /// Example: Given /// \code /// int a, b; /// int c; /// int d = 2, e; /// \endcode /// declCountIs(2) /// matches 'int a, b;' and 'int d = 2, e;', but not 'int c;'. AST_MATCHER_P(DeclStmt, declCountIs, unsigned, N) { return std::distance(Node.decl_begin(), Node.decl_end()) == (ptrdiff_t)N; } /// Matches the n'th declaration of a declaration statement. /// /// Note that this does not work for global declarations because the AST /// breaks up multiple-declaration DeclStmt's into multiple single-declaration /// DeclStmt's. /// Example: Given non-global declarations /// \code /// int a, b = 0; /// int c; /// int d = 2, e; /// \endcode /// declStmt(containsDeclaration( /// 0, varDecl(hasInitializer(anything())))) /// matches only 'int d = 2, e;', and /// declStmt(containsDeclaration(1, varDecl())) /// \code /// matches 'int a, b = 0' as well as 'int d = 2, e;' /// but 'int c;' is not matched. /// \endcode AST_MATCHER_P2(DeclStmt, containsDeclaration, unsigned, N, internal::Matcher<Decl>, InnerMatcher) { const unsigned NumDecls = std::distance(Node.decl_begin(), Node.decl_end()); if (N >= NumDecls) return false; DeclStmt::const_decl_iterator Iterator = Node.decl_begin(); std::advance(Iterator, N); return InnerMatcher.matches(**Iterator, Finder, Builder); } /// Matches a C++ catch statement that has a catch-all handler. /// /// Given /// \code /// try { /// // ... /// } catch (int) { /// // ... /// } catch (...) { /// // ... /// } /// \endcode /// cxxCatchStmt(isCatchAll()) matches catch(...) but not catch(int). AST_MATCHER(CXXCatchStmt, isCatchAll) { return Node.getExceptionDecl() == nullptr; } /// Matches a constructor initializer. /// /// Given /// \code /// struct Foo { /// Foo() : foo_(1) { } /// int foo_; /// }; /// \endcode /// cxxRecordDecl(has(cxxConstructorDecl( /// hasAnyConstructorInitializer(anything()) /// ))) /// record matches Foo, hasAnyConstructorInitializer matches foo_(1) AST_MATCHER_P(CXXConstructorDecl, hasAnyConstructorInitializer, internal::Matcher<CXXCtorInitializer>, InnerMatcher) { return matchesFirstInPointerRange(InnerMatcher, Node.init_begin(), Node.init_end(), Finder, Builder); } /// Matches the field declaration of a constructor initializer. /// /// Given /// \code /// struct Foo { /// Foo() : foo_(1) { } /// int foo_; /// }; /// \endcode /// cxxRecordDecl(has(cxxConstructorDecl(hasAnyConstructorInitializer( /// forField(hasName("foo_")))))) /// matches Foo /// with forField matching foo_ AST_MATCHER_P(CXXCtorInitializer, forField, internal::Matcher<FieldDecl>, InnerMatcher) { const FieldDecl *NodeAsDecl = Node.getAnyMember(); return (NodeAsDecl != nullptr && InnerMatcher.matches(*NodeAsDecl, Finder, Builder)); } /// Matches the initializer expression of a constructor initializer. /// /// Given /// \code /// struct Foo { /// Foo() : foo_(1) { } /// int foo_; /// }; /// \endcode /// cxxRecordDecl(has(cxxConstructorDecl(hasAnyConstructorInitializer( /// withInitializer(integerLiteral(equals(1))))))) /// matches Foo /// with withInitializer matching (1) AST_MATCHER_P(CXXCtorInitializer, withInitializer, internal::Matcher<Expr>, InnerMatcher) { const Expr* NodeAsExpr = Node.getInit(); return (NodeAsExpr != nullptr && InnerMatcher.matches(*NodeAsExpr, Finder, Builder)); } /// Matches a constructor initializer if it is explicitly written in /// code (as opposed to implicitly added by the compiler). /// /// Given /// \code /// struct Foo { /// Foo() { } /// Foo(int) : foo_("A") { } /// string foo_; /// }; /// \endcode /// cxxConstructorDecl(hasAnyConstructorInitializer(isWritten())) /// will match Foo(int), but not Foo() AST_MATCHER(CXXCtorInitializer, isWritten) { return Node.isWritten(); } /// Matches a constructor initializer if it is initializing a base, as /// opposed to a member. /// /// Given /// \code /// struct B {}; /// struct D : B { /// int I; /// D(int i) : I(i) {} /// }; /// struct E : B { /// E() : B() {} /// }; /// \endcode /// cxxConstructorDecl(hasAnyConstructorInitializer(isBaseInitializer())) /// will match E(), but not match D(int). AST_MATCHER(CXXCtorInitializer, isBaseInitializer) { return Node.isBaseInitializer(); } /// Matches a constructor initializer if it is initializing a member, as /// opposed to a base. /// /// Given /// \code /// struct B {}; /// struct D : B { /// int I; /// D(int i) : I(i) {} /// }; /// struct E : B { /// E() : B() {} /// }; /// \endcode /// cxxConstructorDecl(hasAnyConstructorInitializer(isMemberInitializer())) /// will match D(int), but not match E(). AST_MATCHER(CXXCtorInitializer, isMemberInitializer) { return Node.isMemberInitializer(); } /// Matches any argument of a call expression or a constructor call /// expression, or an ObjC-message-send expression. /// /// Given /// \code /// void x(int, int, int) { int y; x(1, y, 42); } /// \endcode /// callExpr(hasAnyArgument(declRefExpr())) /// matches x(1, y, 42) /// with hasAnyArgument(...) /// matching y /// /// For ObjectiveC, given /// \code /// @interface I - (void) f:(int) y; @end /// void foo(I *i) { [i f:12]; } /// \endcode /// objcMessageExpr(hasAnyArgument(integerLiteral(equals(12)))) /// matches [i f:12] AST_POLYMORPHIC_MATCHER_P(hasAnyArgument, AST_POLYMORPHIC_SUPPORTED_TYPES( CallExpr, CXXConstructExpr, CXXUnresolvedConstructExpr, ObjCMessageExpr), internal::Matcher<Expr>, InnerMatcher) { for (const Expr *Arg : Node.arguments()) { BoundNodesTreeBuilder Result(*Builder); if (InnerMatcher.matches(*Arg, Finder, &Result)) { *Builder = std::move(Result); return true; } } return false; } /// Matches any capture of a lambda expression. /// /// Given /// \code /// void foo() { /// int x; /// auto f = [x](){}; /// } /// \endcode /// lambdaExpr(hasAnyCapture(anything())) /// matches [x](){}; AST_MATCHER_P_OVERLOAD(LambdaExpr, hasAnyCapture, internal::Matcher<VarDecl>, InnerMatcher, 0) { for (const LambdaCapture &Capture : Node.captures()) { if (Capture.capturesVariable()) { BoundNodesTreeBuilder Result(*Builder); if (InnerMatcher.matches(*Capture.getCapturedVar(), Finder, &Result)) { *Builder = std::move(Result); return true; } } } return false; } /// Matches any capture of 'this' in a lambda expression. /// /// Given /// \code /// struct foo { /// void bar() { /// auto f = [this](){}; /// } /// } /// \endcode /// lambdaExpr(hasAnyCapture(cxxThisExpr())) /// matches [this](){}; AST_MATCHER_P_OVERLOAD(LambdaExpr, hasAnyCapture, internal::Matcher<CXXThisExpr>, InnerMatcher, 1) { return llvm::any_of(Node.captures(), [](const LambdaCapture &LC) { return LC.capturesThis(); }); } /// Matches a constructor call expression which uses list initialization. AST_MATCHER(CXXConstructExpr, isListInitialization) { return Node.isListInitialization(); } /// Matches a constructor call expression which requires /// zero initialization. /// /// Given /// \code /// void foo() { /// struct point { double x; double y; }; /// point pt[2] = { { 1.0, 2.0 } }; /// } /// \endcode /// initListExpr(has(cxxConstructExpr(requiresZeroInitialization())) /// will match the implicit array filler for pt[1]. AST_MATCHER(CXXConstructExpr, requiresZeroInitialization) { return Node.requiresZeroInitialization(); } /// Matches the n'th parameter of a function or an ObjC method /// declaration or a block. /// /// Given /// \code /// class X { void f(int x) {} }; /// \endcode /// cxxMethodDecl(hasParameter(0, hasType(varDecl()))) /// matches f(int x) {} /// with hasParameter(...) /// matching int x /// /// For ObjectiveC, given /// \code /// @interface I - (void) f:(int) y; @end /// \endcode // /// the matcher objcMethodDecl(hasParameter(0, hasName("y"))) /// matches the declaration of method f with hasParameter /// matching y. AST_POLYMORPHIC_MATCHER_P2(hasParameter, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, ObjCMethodDecl, BlockDecl), unsigned, N, internal::Matcher<ParmVarDecl>, InnerMatcher) { return (N < Node.parameters().size() && InnerMatcher.matches(*Node.parameters()[N], Finder, Builder)); } /// Matches all arguments and their respective ParmVarDecl. /// /// Given /// \code /// void f(int i); /// int y; /// f(y); /// \endcode /// callExpr( /// forEachArgumentWithParam( /// declRefExpr(to(varDecl(hasName("y")))), /// parmVarDecl(hasType(isInteger())) /// )) /// matches f(y); /// with declRefExpr(...) /// matching int y /// and parmVarDecl(...) /// matching int i AST_POLYMORPHIC_MATCHER_P2(forEachArgumentWithParam, AST_POLYMORPHIC_SUPPORTED_TYPES(CallExpr, CXXConstructExpr), internal::Matcher<Expr>, ArgMatcher, internal::Matcher<ParmVarDecl>, ParamMatcher) { BoundNodesTreeBuilder Result; // The first argument of an overloaded member operator is the implicit object // argument of the method which should not be matched against a parameter, so // we skip over it here. BoundNodesTreeBuilder Matches; unsigned ArgIndex = cxxOperatorCallExpr(callee(cxxMethodDecl())) .matches(Node, Finder, &Matches) ? 1 : 0; int ParamIndex = 0; bool Matched = false; for (; ArgIndex < Node.getNumArgs(); ++ArgIndex) { BoundNodesTreeBuilder ArgMatches(*Builder); if (ArgMatcher.matches(*(Node.getArg(ArgIndex)->IgnoreParenCasts()), Finder, &ArgMatches)) { BoundNodesTreeBuilder ParamMatches(ArgMatches); if (expr(anyOf(cxxConstructExpr(hasDeclaration(cxxConstructorDecl( hasParameter(ParamIndex, ParamMatcher)))), callExpr(callee(functionDecl( hasParameter(ParamIndex, ParamMatcher)))))) .matches(Node, Finder, &ParamMatches)) { Result.addMatch(ParamMatches); Matched = true; } } ++ParamIndex; } *Builder = std::move(Result); return Matched; } /// Matches all arguments and their respective types for a \c CallExpr or /// \c CXXConstructExpr. It is very similar to \c forEachArgumentWithParam but /// it works on calls through function pointers as well. /// /// The difference is, that function pointers do not provide access to a /// \c ParmVarDecl, but only the \c QualType for each argument. /// /// Given /// \code /// void f(int i); /// int y; /// f(y); /// void (*f_ptr)(int) = f; /// f_ptr(y); /// \endcode /// callExpr( /// forEachArgumentWithParamType( /// declRefExpr(to(varDecl(hasName("y")))), /// qualType(isInteger()).bind("type) /// )) /// matches f(y) and f_ptr(y) /// with declRefExpr(...) /// matching int y /// and qualType(...) /// matching int AST_POLYMORPHIC_MATCHER_P2(forEachArgumentWithParamType, AST_POLYMORPHIC_SUPPORTED_TYPES(CallExpr, CXXConstructExpr), internal::Matcher<Expr>, ArgMatcher, internal::Matcher<QualType>, ParamMatcher) { BoundNodesTreeBuilder Result; // The first argument of an overloaded member operator is the implicit object // argument of the method which should not be matched against a parameter, so // we skip over it here. BoundNodesTreeBuilder Matches; unsigned ArgIndex = cxxOperatorCallExpr(callee(cxxMethodDecl())) .matches(Node, Finder, &Matches) ? 1 : 0; const FunctionProtoType *FProto = nullptr; if (const auto *Call = dyn_cast<CallExpr>(&Node)) { if (const auto *Value = dyn_cast_or_null<ValueDecl>(Call->getCalleeDecl())) { QualType QT = Value->getType().getCanonicalType(); // This does not necessarily lead to a `FunctionProtoType`, // e.g. K&R functions do not have a function prototype. if (QT->isFunctionPointerType()) FProto = QT->getPointeeType()->getAs<FunctionProtoType>(); if (QT->isMemberFunctionPointerType()) { const auto *MP = QT->getAs<MemberPointerType>(); assert(MP && "Must be member-pointer if its a memberfunctionpointer"); FProto = MP->getPointeeType()->getAs<FunctionProtoType>(); assert(FProto && "The call must have happened through a member function " "pointer"); } } } int ParamIndex = 0; bool Matched = false; for (; ArgIndex < Node.getNumArgs(); ++ArgIndex, ++ParamIndex) { BoundNodesTreeBuilder ArgMatches(*Builder); if (ArgMatcher.matches(*(Node.getArg(ArgIndex)->IgnoreParenCasts()), Finder, &ArgMatches)) { BoundNodesTreeBuilder ParamMatches(ArgMatches); // This test is cheaper compared to the big matcher in the next if. // Therefore, please keep this order. if (FProto) { QualType ParamType = FProto->getParamType(ParamIndex); if (ParamMatcher.matches(ParamType, Finder, &ParamMatches)) { Result.addMatch(ParamMatches); Matched = true; continue; } } if (expr(anyOf(cxxConstructExpr(hasDeclaration(cxxConstructorDecl( hasParameter(ParamIndex, hasType(ParamMatcher))))), callExpr(callee(functionDecl( hasParameter(ParamIndex, hasType(ParamMatcher))))))) .matches(Node, Finder, &ParamMatches)) { Result.addMatch(ParamMatches); Matched = true; continue; } } } *Builder = std::move(Result); return Matched; } /// Matches the ParmVarDecl nodes that are at the N'th position in the parameter /// list. The parameter list could be that of either a block, function, or /// objc-method. /// /// /// Given /// /// \code /// void f(int a, int b, int c) { /// } /// \endcode /// /// ``parmVarDecl(isAtPosition(0))`` matches ``int a``. /// /// ``parmVarDecl(isAtPosition(1))`` matches ``int b``. AST_MATCHER_P(ParmVarDecl, isAtPosition, unsigned, N) { const clang::DeclContext *Context = Node.getParentFunctionOrMethod(); if (const auto *Decl = dyn_cast_or_null<FunctionDecl>(Context)) return N < Decl->param_size() && Decl->getParamDecl(N) == &Node; if (const auto *Decl = dyn_cast_or_null<BlockDecl>(Context)) return N < Decl->param_size() && Decl->getParamDecl(N) == &Node; if (const auto *Decl = dyn_cast_or_null<ObjCMethodDecl>(Context)) return N < Decl->param_size() && Decl->getParamDecl(N) == &Node; return false; } /// Matches any parameter of a function or an ObjC method declaration or a /// block. /// /// Does not match the 'this' parameter of a method. /// /// Given /// \code /// class X { void f(int x, int y, int z) {} }; /// \endcode /// cxxMethodDecl(hasAnyParameter(hasName("y"))) /// matches f(int x, int y, int z) {} /// with hasAnyParameter(...) /// matching int y /// /// For ObjectiveC, given /// \code /// @interface I - (void) f:(int) y; @end /// \endcode // /// the matcher objcMethodDecl(hasAnyParameter(hasName("y"))) /// matches the declaration of method f with hasParameter /// matching y. /// /// For blocks, given /// \code /// b = ^(int y) { printf("%d", y) }; /// \endcode /// /// the matcher blockDecl(hasAnyParameter(hasName("y"))) /// matches the declaration of the block b with hasParameter /// matching y. AST_POLYMORPHIC_MATCHER_P(hasAnyParameter, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, ObjCMethodDecl, BlockDecl), internal::Matcher<ParmVarDecl>, InnerMatcher) { return matchesFirstInPointerRange(InnerMatcher, Node.param_begin(), Node.param_end(), Finder, Builder); } /// Matches \c FunctionDecls and \c FunctionProtoTypes that have a /// specific parameter count. /// /// Given /// \code /// void f(int i) {} /// void g(int i, int j) {} /// void h(int i, int j); /// void j(int i); /// void k(int x, int y, int z, ...); /// \endcode /// functionDecl(parameterCountIs(2)) /// matches \c g and \c h /// functionProtoType(parameterCountIs(2)) /// matches \c g and \c h /// functionProtoType(parameterCountIs(3)) /// matches \c k AST_POLYMORPHIC_MATCHER_P(parameterCountIs, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, FunctionProtoType), unsigned, N) { return Node.getNumParams() == N; } /// Matches \c FunctionDecls that have a noreturn attribute. /// /// Given /// \code /// void nope(); /// [[noreturn]] void a(); /// __attribute__((noreturn)) void b(); /// struct c { [[noreturn]] c(); }; /// \endcode /// functionDecl(isNoReturn()) /// matches all of those except /// \code /// void nope(); /// \endcode AST_MATCHER(FunctionDecl, isNoReturn) { return Node.isNoReturn(); } /// Matches the return type of a function declaration. /// /// Given: /// \code /// class X { int f() { return 1; } }; /// \endcode /// cxxMethodDecl(returns(asString("int"))) /// matches int f() { return 1; } AST_MATCHER_P(FunctionDecl, returns, internal::Matcher<QualType>, InnerMatcher) { return InnerMatcher.matches(Node.getReturnType(), Finder, Builder); } /// Matches extern "C" function or variable declarations. /// /// Given: /// \code /// extern "C" void f() {} /// extern "C" { void g() {} } /// void h() {} /// extern "C" int x = 1; /// extern "C" int y = 2; /// int z = 3; /// \endcode /// functionDecl(isExternC()) /// matches the declaration of f and g, but not the declaration of h. /// varDecl(isExternC()) /// matches the declaration of x and y, but not the declaration of z. AST_POLYMORPHIC_MATCHER(isExternC, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl)) { return Node.isExternC(); } /// Matches variable/function declarations that have "static" storage /// class specifier ("static" keyword) written in the source. /// /// Given: /// \code /// static void f() {} /// static int i = 0; /// extern int j; /// int k; /// \endcode /// functionDecl(isStaticStorageClass()) /// matches the function declaration f. /// varDecl(isStaticStorageClass()) /// matches the variable declaration i. AST_POLYMORPHIC_MATCHER(isStaticStorageClass, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl)) { return Node.getStorageClass() == SC_Static; } /// Matches deleted function declarations. /// /// Given: /// \code /// void Func(); /// void DeletedFunc() = delete; /// \endcode /// functionDecl(isDeleted()) /// matches the declaration of DeletedFunc, but not Func. AST_MATCHER(FunctionDecl, isDeleted) { return Node.isDeleted(); } /// Matches defaulted function declarations. /// /// Given: /// \code /// class A { ~A(); }; /// class B { ~B() = default; }; /// \endcode /// functionDecl(isDefaulted()) /// matches the declaration of ~B, but not ~A. AST_MATCHER(FunctionDecl, isDefaulted) { return Node.isDefaulted(); } /// Matches functions that have a dynamic exception specification. /// /// Given: /// \code /// void f(); /// void g() noexcept; /// void h() noexcept(true); /// void i() noexcept(false); /// void j() throw(); /// void k() throw(int); /// void l() throw(...); /// \endcode /// functionDecl(hasDynamicExceptionSpec()) and /// functionProtoType(hasDynamicExceptionSpec()) /// match the declarations of j, k, and l, but not f, g, h, or i. AST_POLYMORPHIC_MATCHER(hasDynamicExceptionSpec, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, FunctionProtoType)) { if (const FunctionProtoType *FnTy = internal::getFunctionProtoType(Node)) return FnTy->hasDynamicExceptionSpec(); return false; } /// Matches functions that have a non-throwing exception specification. /// /// Given: /// \code /// void f(); /// void g() noexcept; /// void h() throw(); /// void i() throw(int); /// void j() noexcept(false); /// \endcode /// functionDecl(isNoThrow()) and functionProtoType(isNoThrow()) /// match the declarations of g, and h, but not f, i or j. AST_POLYMORPHIC_MATCHER(isNoThrow, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, FunctionProtoType)) { const FunctionProtoType *FnTy = internal::getFunctionProtoType(Node); // If the function does not have a prototype, then it is assumed to be a // throwing function (as it would if the function did not have any exception // specification). if (!FnTy) return false; // Assume the best for any unresolved exception specification. if (isUnresolvedExceptionSpec(FnTy->getExceptionSpecType())) return true; return FnTy->isNothrow(); } /// Matches constexpr variable and function declarations, /// and if constexpr. /// /// Given: /// \code /// constexpr int foo = 42; /// constexpr int bar(); /// void baz() { if constexpr(1 > 0) {} } /// \endcode /// varDecl(isConstexpr()) /// matches the declaration of foo. /// functionDecl(isConstexpr()) /// matches the declaration of bar. /// ifStmt(isConstexpr()) /// matches the if statement in baz. AST_POLYMORPHIC_MATCHER(isConstexpr, AST_POLYMORPHIC_SUPPORTED_TYPES(VarDecl, FunctionDecl, IfStmt)) { return Node.isConstexpr(); } /// Matches selection statements with initializer. /// /// Given: /// \code /// void foo() { /// if (int i = foobar(); i > 0) {} /// switch (int i = foobar(); i) {} /// for (auto& a = get_range(); auto& x : a) {} /// } /// void bar() { /// if (foobar() > 0) {} /// switch (foobar()) {} /// for (auto& x : get_range()) {} /// } /// \endcode /// ifStmt(hasInitStatement(anything())) /// matches the if statement in foo but not in bar. /// switchStmt(hasInitStatement(anything())) /// matches the switch statement in foo but not in bar. /// cxxForRangeStmt(hasInitStatement(anything())) /// matches the range for statement in foo but not in bar. AST_POLYMORPHIC_MATCHER_P(hasInitStatement, AST_POLYMORPHIC_SUPPORTED_TYPES(IfStmt, SwitchStmt, CXXForRangeStmt), internal::Matcher<Stmt>, InnerMatcher) { const Stmt *Init = Node.getInit(); return Init != nullptr && InnerMatcher.matches(*Init, Finder, Builder); } /// Matches the condition expression of an if statement, for loop, /// switch statement or conditional operator. /// /// Example matches true (matcher = hasCondition(cxxBoolLiteral(equals(true)))) /// \code /// if (true) {} /// \endcode AST_POLYMORPHIC_MATCHER_P( hasCondition, AST_POLYMORPHIC_SUPPORTED_TYPES(IfStmt, ForStmt, WhileStmt, DoStmt, SwitchStmt, AbstractConditionalOperator), internal::Matcher<Expr>, InnerMatcher) { const Expr *const Condition = Node.getCond(); return (Condition != nullptr && InnerMatcher.matches(*Condition, Finder, Builder)); } /// Matches the then-statement of an if statement. /// /// Examples matches the if statement /// (matcher = ifStmt(hasThen(cxxBoolLiteral(equals(true))))) /// \code /// if (false) true; else false; /// \endcode AST_MATCHER_P(IfStmt, hasThen, internal::Matcher<Stmt>, InnerMatcher) { const Stmt *const Then = Node.getThen(); return (Then != nullptr && InnerMatcher.matches(*Then, Finder, Builder)); } /// Matches the else-statement of an if statement. /// /// Examples matches the if statement /// (matcher = ifStmt(hasElse(cxxBoolLiteral(equals(true))))) /// \code /// if (false) false; else true; /// \endcode AST_MATCHER_P(IfStmt, hasElse, internal::Matcher<Stmt>, InnerMatcher) { const Stmt *const Else = Node.getElse(); return (Else != nullptr && InnerMatcher.matches(*Else, Finder, Builder)); } /// Matches if a node equals a previously bound node. /// /// Matches a node if it equals the node previously bound to \p ID. /// /// Given /// \code /// class X { int a; int b; }; /// \endcode /// cxxRecordDecl( /// has(fieldDecl(hasName("a"), hasType(type().bind("t")))), /// has(fieldDecl(hasName("b"), hasType(type(equalsBoundNode("t")))))) /// matches the class \c X, as \c a and \c b have the same type. /// /// Note that when multiple matches are involved via \c forEach* matchers, /// \c equalsBoundNodes acts as a filter. /// For example: /// compoundStmt( /// forEachDescendant(varDecl().bind("d")), /// forEachDescendant(declRefExpr(to(decl(equalsBoundNode("d")))))) /// will trigger a match for each combination of variable declaration /// and reference to that variable declaration within a compound statement. AST_POLYMORPHIC_MATCHER_P(equalsBoundNode, AST_POLYMORPHIC_SUPPORTED_TYPES(Stmt, Decl, Type, QualType), std::string, ID) { // FIXME: Figure out whether it makes sense to allow this // on any other node types. // For *Loc it probably does not make sense, as those seem // unique. For NestedNameSepcifier it might make sense, as // those also have pointer identity, but I'm not sure whether // they're ever reused. internal::NotEqualsBoundNodePredicate Predicate; Predicate.ID = ID; Predicate.Node = DynTypedNode::create(Node); return Builder->removeBindings(Predicate); } /// Matches the condition variable statement in an if statement. /// /// Given /// \code /// if (A* a = GetAPointer()) {} /// \endcode /// hasConditionVariableStatement(...) /// matches 'A* a = GetAPointer()'. AST_MATCHER_P(IfStmt, hasConditionVariableStatement, internal::Matcher<DeclStmt>, InnerMatcher) { const DeclStmt* const DeclarationStatement = Node.getConditionVariableDeclStmt(); return DeclarationStatement != nullptr && InnerMatcher.matches(*DeclarationStatement, Finder, Builder); } /// Matches the index expression of an array subscript expression. /// /// Given /// \code /// int i[5]; /// void f() { i[1] = 42; } /// \endcode /// arraySubscriptExpression(hasIndex(integerLiteral())) /// matches \c i[1] with the \c integerLiteral() matching \c 1 AST_MATCHER_P(ArraySubscriptExpr, hasIndex, internal::Matcher<Expr>, InnerMatcher) { if (const Expr* Expression = Node.getIdx()) return InnerMatcher.matches(*Expression, Finder, Builder); return false; } /// Matches the base expression of an array subscript expression. /// /// Given /// \code /// int i[5]; /// void f() { i[1] = 42; } /// \endcode /// arraySubscriptExpression(hasBase(implicitCastExpr( /// hasSourceExpression(declRefExpr())))) /// matches \c i[1] with the \c declRefExpr() matching \c i AST_MATCHER_P(ArraySubscriptExpr, hasBase, internal::Matcher<Expr>, InnerMatcher) { if (const Expr* Expression = Node.getBase()) return InnerMatcher.matches(*Expression, Finder, Builder); return false; } /// Matches a 'for', 'while', 'do while' statement or a function /// definition that has a given body. Note that in case of functions /// this matcher only matches the definition itself and not the other /// declarations of the same function. /// /// Given /// \code /// for (;;) {} /// \endcode /// hasBody(compoundStmt()) /// matches 'for (;;) {}' /// with compoundStmt() /// matching '{}' /// /// Given /// \code /// void f(); /// void f() {} /// \endcode /// hasBody(functionDecl()) /// matches 'void f() {}' /// with compoundStmt() /// matching '{}' /// but does not match 'void f();' AST_POLYMORPHIC_MATCHER_P(hasBody, AST_POLYMORPHIC_SUPPORTED_TYPES(DoStmt, ForStmt, WhileStmt, CXXForRangeStmt, FunctionDecl), internal::Matcher<Stmt>, InnerMatcher) { const Stmt *const Statement = internal::GetBodyMatcher<NodeType>::get(Node); return (Statement != nullptr && InnerMatcher.matches(*Statement, Finder, Builder)); } /// Matches a function declaration that has a given body present in the AST. /// Note that this matcher matches all the declarations of a function whose /// body is present in the AST. /// /// Given /// \code /// void f(); /// void f() {} /// void g(); /// \endcode /// hasAnyBody(functionDecl()) /// matches both 'void f();' /// and 'void f() {}' /// with compoundStmt() /// matching '{}' /// but does not match 'void g();' AST_MATCHER_P(FunctionDecl, hasAnyBody, internal::Matcher<Stmt>, InnerMatcher) { const Stmt *const Statement = Node.getBody(); return (Statement != nullptr && InnerMatcher.matches(*Statement, Finder, Builder)); } /// Matches compound statements where at least one substatement matches /// a given matcher. Also matches StmtExprs that have CompoundStmt as children. /// /// Given /// \code /// { {}; 1+2; } /// \endcode /// hasAnySubstatement(compoundStmt()) /// matches '{ {}; 1+2; }' /// with compoundStmt() /// matching '{}' AST_POLYMORPHIC_MATCHER_P(hasAnySubstatement, AST_POLYMORPHIC_SUPPORTED_TYPES(CompoundStmt, StmtExpr), internal::Matcher<Stmt>, InnerMatcher) { const CompoundStmt *CS = CompoundStmtMatcher<NodeType>::get(Node); return CS && matchesFirstInPointerRange(InnerMatcher, CS->body_begin(), CS->body_end(), Finder, Builder); } /// Checks that a compound statement contains a specific number of /// child statements. /// /// Example: Given /// \code /// { for (;;) {} } /// \endcode /// compoundStmt(statementCountIs(0))) /// matches '{}' /// but does not match the outer compound statement. AST_MATCHER_P(CompoundStmt, statementCountIs, unsigned, N) { return Node.size() == N; } /// Matches literals that are equal to the given value of type ValueT. /// /// Given /// \code /// f('\0', false, 3.14, 42); /// \endcode /// characterLiteral(equals(0)) /// matches '\0' /// cxxBoolLiteral(equals(false)) and cxxBoolLiteral(equals(0)) /// match false /// floatLiteral(equals(3.14)) and floatLiteral(equals(314e-2)) /// match 3.14 /// integerLiteral(equals(42)) /// matches 42 /// /// Note that you cannot directly match a negative numeric literal because the /// minus sign is not part of the literal: It is a unary operator whose operand /// is the positive numeric literal. Instead, you must use a unaryOperator() /// matcher to match the minus sign: /// /// unaryOperator(hasOperatorName("-"), /// hasUnaryOperand(integerLiteral(equals(13)))) /// /// Usable as: Matcher<CharacterLiteral>, Matcher<CXXBoolLiteralExpr>, /// Matcher<FloatingLiteral>, Matcher<IntegerLiteral> template <typename ValueT> internal::PolymorphicMatcherWithParam1<internal::ValueEqualsMatcher, ValueT> equals(const ValueT &Value) { return internal::PolymorphicMatcherWithParam1< internal::ValueEqualsMatcher, ValueT>(Value); } AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals, AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral, CXXBoolLiteralExpr, IntegerLiteral), bool, Value, 0) { return internal::ValueEqualsMatcher<NodeType, ParamT>(Value) .matchesNode(Node); } AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals, AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral, CXXBoolLiteralExpr, IntegerLiteral), unsigned, Value, 1) { return internal::ValueEqualsMatcher<NodeType, ParamT>(Value) .matchesNode(Node); } AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals, AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral, CXXBoolLiteralExpr, FloatingLiteral, IntegerLiteral), double, Value, 2) { return internal::ValueEqualsMatcher<NodeType, ParamT>(Value) .matchesNode(Node); } /// Matches the operator Name of operator expressions (binary or /// unary). /// /// Example matches a || b (matcher = binaryOperator(hasOperatorName("||"))) /// \code /// !(a || b) /// \endcode AST_POLYMORPHIC_MATCHER_P(hasOperatorName, AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, UnaryOperator), std::string, Name) { return Name == Node.getOpcodeStr(Node.getOpcode()); } /// Matches operator expressions (binary or unary) that have any of the /// specified names. /// /// hasAnyOperatorName("+", "-") /// Is equivalent to /// anyOf(hasOperatorName("+"), hasOperatorName("-")) extern const internal::VariadicFunction< internal::PolymorphicMatcherWithParam1< internal::HasAnyOperatorNameMatcher, std::vector<std::string>, AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, UnaryOperator)>, StringRef, internal::hasAnyOperatorNameFunc> hasAnyOperatorName; /// Matches all kinds of assignment operators. /// /// Example 1: matches a += b (matcher = binaryOperator(isAssignmentOperator())) /// \code /// if (a == b) /// a += b; /// \endcode /// /// Example 2: matches s1 = s2 /// (matcher = cxxOperatorCallExpr(isAssignmentOperator())) /// \code /// struct S { S& operator=(const S&); }; /// void x() { S s1, s2; s1 = s2; } /// \endcode AST_POLYMORPHIC_MATCHER(isAssignmentOperator, AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, CXXOperatorCallExpr)) { return Node.isAssignmentOp(); } /// Matches comparison operators. /// /// Example 1: matches a == b (matcher = binaryOperator(isComparisonOperator())) /// \code /// if (a == b) /// a += b; /// \endcode /// /// Example 2: matches s1 < s2 /// (matcher = cxxOperatorCallExpr(isComparisonOperator())) /// \code /// struct S { bool operator<(const S& other); }; /// void x(S s1, S s2) { bool b1 = s1 < s2; } /// \endcode AST_POLYMORPHIC_MATCHER(isComparisonOperator, AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, CXXOperatorCallExpr)) { return Node.isComparisonOp(); } /// Matches the left hand side of binary operator expressions. /// /// Example matches a (matcher = binaryOperator(hasLHS())) /// \code /// a || b /// \endcode AST_POLYMORPHIC_MATCHER_P(hasLHS, AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, ArraySubscriptExpr), internal::Matcher<Expr>, InnerMatcher) { const Expr *LeftHandSide = Node.getLHS(); return (LeftHandSide != nullptr && InnerMatcher.matches(*LeftHandSide, Finder, Builder)); } /// Matches the right hand side of binary operator expressions. /// /// Example matches b (matcher = binaryOperator(hasRHS())) /// \code /// a || b /// \endcode AST_POLYMORPHIC_MATCHER_P(hasRHS, AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, ArraySubscriptExpr), internal::Matcher<Expr>, InnerMatcher) { const Expr *RightHandSide = Node.getRHS(); return (RightHandSide != nullptr && InnerMatcher.matches(*RightHandSide, Finder, Builder)); } /// Matches if either the left hand side or the right hand side of a /// binary operator matches. inline internal::Matcher<BinaryOperator> hasEitherOperand( const internal::Matcher<Expr> &InnerMatcher) { return anyOf(hasLHS(InnerMatcher), hasRHS(InnerMatcher)); } /// Matches if both matchers match with opposite sides of the binary operator. /// /// Example matcher = binaryOperator(hasOperands(integerLiteral(equals(1), /// integerLiteral(equals(2))) /// \code /// 1 + 2 // Match /// 2 + 1 // Match /// 1 + 1 // No match /// 2 + 2 // No match /// \endcode inline internal::Matcher<BinaryOperator> hasOperands(const internal::Matcher<Expr> &Matcher1, const internal::Matcher<Expr> &Matcher2) { return anyOf(allOf(hasLHS(Matcher1), hasRHS(Matcher2)), allOf(hasLHS(Matcher2), hasRHS(Matcher1))); } /// Matches if the operand of a unary operator matches. /// /// Example matches true (matcher = hasUnaryOperand( /// cxxBoolLiteral(equals(true)))) /// \code /// !true /// \endcode AST_MATCHER_P(UnaryOperator, hasUnaryOperand, internal::Matcher<Expr>, InnerMatcher) { const Expr * const Operand = Node.getSubExpr(); return (Operand != nullptr && InnerMatcher.matches(*Operand, Finder, Builder)); } /// Matches if the cast's source expression /// or opaque value's source expression matches the given matcher. /// /// Example 1: matches "a string" /// (matcher = castExpr(hasSourceExpression(cxxConstructExpr()))) /// \code /// class URL { URL(string); }; /// URL url = "a string"; /// \endcode /// /// Example 2: matches 'b' (matcher = /// opaqueValueExpr(hasSourceExpression(implicitCastExpr(declRefExpr()))) /// \code /// int a = b ?: 1; /// \endcode AST_POLYMORPHIC_MATCHER_P(hasSourceExpression, AST_POLYMORPHIC_SUPPORTED_TYPES(CastExpr, OpaqueValueExpr), internal::Matcher<Expr>, InnerMatcher) { const Expr *const SubExpression = internal::GetSourceExpressionMatcher<NodeType>::get(Node); return (SubExpression != nullptr && InnerMatcher.matches(*SubExpression, Finder, Builder)); } /// Matches casts that has a given cast kind. /// /// Example: matches the implicit cast around \c 0 /// (matcher = castExpr(hasCastKind(CK_NullToPointer))) /// \code /// int *p = 0; /// \endcode /// /// If the matcher is use from clang-query, CastKind parameter /// should be passed as a quoted string. e.g., hasCastKind("CK_NullToPointer"). AST_MATCHER_P(CastExpr, hasCastKind, CastKind, Kind) { return Node.getCastKind() == Kind; } /// Matches casts whose destination type matches a given matcher. /// /// (Note: Clang's AST refers to other conversions as "casts" too, and calls /// actual casts "explicit" casts.) AST_MATCHER_P(ExplicitCastExpr, hasDestinationType, internal::Matcher<QualType>, InnerMatcher) { const QualType NodeType = Node.getTypeAsWritten(); return InnerMatcher.matches(NodeType, Finder, Builder); } /// Matches implicit casts whose destination type matches a given /// matcher. /// /// FIXME: Unit test this matcher AST_MATCHER_P(ImplicitCastExpr, hasImplicitDestinationType, internal::Matcher<QualType>, InnerMatcher) { return InnerMatcher.matches(Node.getType(), Finder, Builder); } /// Matches TagDecl object that are spelled with "struct." /// /// Example matches S, but not C, U or E. /// \code /// struct S {}; /// class C {}; /// union U {}; /// enum E {}; /// \endcode AST_MATCHER(TagDecl, isStruct) { return Node.isStruct(); } /// Matches TagDecl object that are spelled with "union." /// /// Example matches U, but not C, S or E. /// \code /// struct S {}; /// class C {}; /// union U {}; /// enum E {}; /// \endcode AST_MATCHER(TagDecl, isUnion) { return Node.isUnion(); } /// Matches TagDecl object that are spelled with "class." /// /// Example matches C, but not S, U or E. /// \code /// struct S {}; /// class C {}; /// union U {}; /// enum E {}; /// \endcode AST_MATCHER(TagDecl, isClass) { return Node.isClass(); } /// Matches TagDecl object that are spelled with "enum." /// /// Example matches E, but not C, S or U. /// \code /// struct S {}; /// class C {}; /// union U {}; /// enum E {}; /// \endcode AST_MATCHER(TagDecl, isEnum) { return Node.isEnum(); } /// Matches the true branch expression of a conditional operator. /// /// Example 1 (conditional ternary operator): matches a /// \code /// condition ? a : b /// \endcode /// /// Example 2 (conditional binary operator): matches opaqueValueExpr(condition) /// \code /// condition ?: b /// \endcode AST_MATCHER_P(AbstractConditionalOperator, hasTrueExpression, internal::Matcher<Expr>, InnerMatcher) { const Expr *Expression = Node.getTrueExpr(); return (Expression != nullptr && InnerMatcher.matches(*Expression, Finder, Builder)); } /// Matches the false branch expression of a conditional operator /// (binary or ternary). /// /// Example matches b /// \code /// condition ? a : b /// condition ?: b /// \endcode AST_MATCHER_P(AbstractConditionalOperator, hasFalseExpression, internal::Matcher<Expr>, InnerMatcher) { const Expr *Expression = Node.getFalseExpr(); return (Expression != nullptr && InnerMatcher.matches(*Expression, Finder, Builder)); } /// Matches if a declaration has a body attached. /// /// Example matches A, va, fa /// \code /// class A {}; /// class B; // Doesn't match, as it has no body. /// int va; /// extern int vb; // Doesn't match, as it doesn't define the variable. /// void fa() {} /// void fb(); // Doesn't match, as it has no body. /// @interface X /// - (void)ma; // Doesn't match, interface is declaration. /// @end /// @implementation X /// - (void)ma {} /// @end /// \endcode /// /// Usable as: Matcher<TagDecl>, Matcher<VarDecl>, Matcher<FunctionDecl>, /// Matcher<ObjCMethodDecl> AST_POLYMORPHIC_MATCHER(isDefinition, AST_POLYMORPHIC_SUPPORTED_TYPES(TagDecl, VarDecl, ObjCMethodDecl, FunctionDecl)) { return Node.isThisDeclarationADefinition(); } /// Matches if a function declaration is variadic. /// /// Example matches f, but not g or h. The function i will not match, even when /// compiled in C mode. /// \code /// void f(...); /// void g(int); /// template <typename... Ts> void h(Ts...); /// void i(); /// \endcode AST_MATCHER(FunctionDecl, isVariadic) { return Node.isVariadic(); } /// Matches the class declaration that the given method declaration /// belongs to. /// /// FIXME: Generalize this for other kinds of declarations. /// FIXME: What other kind of declarations would we need to generalize /// this to? /// /// Example matches A() in the last line /// (matcher = cxxConstructExpr(hasDeclaration(cxxMethodDecl( /// ofClass(hasName("A")))))) /// \code /// class A { /// public: /// A(); /// }; /// A a = A(); /// \endcode AST_MATCHER_P(CXXMethodDecl, ofClass, internal::Matcher<CXXRecordDecl>, InnerMatcher) { const CXXRecordDecl *Parent = Node.getParent(); return (Parent != nullptr && InnerMatcher.matches(*Parent, Finder, Builder)); } /// Matches each method overridden by the given method. This matcher may /// produce multiple matches. /// /// Given /// \code /// class A { virtual void f(); }; /// class B : public A { void f(); }; /// class C : public B { void f(); }; /// \endcode /// cxxMethodDecl(ofClass(hasName("C")), /// forEachOverridden(cxxMethodDecl().bind("b"))).bind("d") /// matches once, with "b" binding "A::f" and "d" binding "C::f" (Note /// that B::f is not overridden by C::f). /// /// The check can produce multiple matches in case of multiple inheritance, e.g. /// \code /// class A1 { virtual void f(); }; /// class A2 { virtual void f(); }; /// class C : public A1, public A2 { void f(); }; /// \endcode /// cxxMethodDecl(ofClass(hasName("C")), /// forEachOverridden(cxxMethodDecl().bind("b"))).bind("d") /// matches twice, once with "b" binding "A1::f" and "d" binding "C::f", and /// once with "b" binding "A2::f" and "d" binding "C::f". AST_MATCHER_P(CXXMethodDecl, forEachOverridden, internal::Matcher<CXXMethodDecl>, InnerMatcher) { BoundNodesTreeBuilder Result; bool Matched = false; for (const auto *Overridden : Node.overridden_methods()) { BoundNodesTreeBuilder OverriddenBuilder(*Builder); const bool OverriddenMatched = InnerMatcher.matches(*Overridden, Finder, &OverriddenBuilder); if (OverriddenMatched) { Matched = true; Result.addMatch(OverriddenBuilder); } } *Builder = std::move(Result); return Matched; } /// Matches declarations of virtual methods and C++ base specifers that specify /// virtual inheritance. /// /// Example: /// \code /// class A { /// public: /// virtual void x(); // matches x /// }; /// \endcode /// /// Example: /// \code /// class Base {}; /// class DirectlyDerived : virtual Base {}; // matches Base /// class IndirectlyDerived : DirectlyDerived, Base {}; // matches Base /// \endcode /// /// Usable as: Matcher<CXXMethodDecl>, Matcher<CXXBaseSpecifier> AST_POLYMORPHIC_MATCHER(isVirtual, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXMethodDecl, CXXBaseSpecifier)) { return Node.isVirtual(); } /// Matches if the given method declaration has an explicit "virtual". /// /// Given /// \code /// class A { /// public: /// virtual void x(); /// }; /// class B : public A { /// public: /// void x(); /// }; /// \endcode /// matches A::x but not B::x AST_MATCHER(CXXMethodDecl, isVirtualAsWritten) { return Node.isVirtualAsWritten(); } /// Matches if the given method or class declaration is final. /// /// Given: /// \code /// class A final {}; /// /// struct B { /// virtual void f(); /// }; /// /// struct C : B { /// void f() final; /// }; /// \endcode /// matches A and C::f, but not B, C, or B::f AST_POLYMORPHIC_MATCHER(isFinal, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, CXXMethodDecl)) { return Node.template hasAttr<FinalAttr>(); } /// Matches if the given method declaration is pure. /// /// Given /// \code /// class A { /// public: /// virtual void x() = 0; /// }; /// \endcode /// matches A::x AST_MATCHER(CXXMethodDecl, isPure) { return Node.isPure(); } /// Matches if the given method declaration is const. /// /// Given /// \code /// struct A { /// void foo() const; /// void bar(); /// }; /// \endcode /// /// cxxMethodDecl(isConst()) matches A::foo() but not A::bar() AST_MATCHER(CXXMethodDecl, isConst) { return Node.isConst(); } /// Matches if the given method declaration declares a copy assignment /// operator. /// /// Given /// \code /// struct A { /// A &operator=(const A &); /// A &operator=(A &&); /// }; /// \endcode /// /// cxxMethodDecl(isCopyAssignmentOperator()) matches the first method but not /// the second one. AST_MATCHER(CXXMethodDecl, isCopyAssignmentOperator) { return Node.isCopyAssignmentOperator(); } /// Matches if the given method declaration declares a move assignment /// operator. /// /// Given /// \code /// struct A { /// A &operator=(const A &); /// A &operator=(A &&); /// }; /// \endcode /// /// cxxMethodDecl(isMoveAssignmentOperator()) matches the second method but not /// the first one. AST_MATCHER(CXXMethodDecl, isMoveAssignmentOperator) { return Node.isMoveAssignmentOperator(); } /// Matches if the given method declaration overrides another method. /// /// Given /// \code /// class A { /// public: /// virtual void x(); /// }; /// class B : public A { /// public: /// virtual void x(); /// }; /// \endcode /// matches B::x AST_MATCHER(CXXMethodDecl, isOverride) { return Node.size_overridden_methods() > 0 || Node.hasAttr<OverrideAttr>(); } /// Matches method declarations that are user-provided. /// /// Given /// \code /// struct S { /// S(); // #1 /// S(const S &) = default; // #2 /// S(S &&) = delete; // #3 /// }; /// \endcode /// cxxConstructorDecl(isUserProvided()) will match #1, but not #2 or #3. AST_MATCHER(CXXMethodDecl, isUserProvided) { return Node.isUserProvided(); } /// Matches member expressions that are called with '->' as opposed /// to '.'. /// /// Member calls on the implicit this pointer match as called with '->'. /// /// Given /// \code /// class Y { /// void x() { this->x(); x(); Y y; y.x(); a; this->b; Y::b; } /// template <class T> void f() { this->f<T>(); f<T>(); } /// int a; /// static int b; /// }; /// template <class T> /// class Z { /// void x() { this->m; } /// }; /// \endcode /// memberExpr(isArrow()) /// matches this->x, x, y.x, a, this->b /// cxxDependentScopeMemberExpr(isArrow()) /// matches this->m /// unresolvedMemberExpr(isArrow()) /// matches this->f<T>, f<T> AST_POLYMORPHIC_MATCHER( isArrow, AST_POLYMORPHIC_SUPPORTED_TYPES(MemberExpr, UnresolvedMemberExpr, CXXDependentScopeMemberExpr)) { return Node.isArrow(); } /// Matches QualType nodes that are of integer type. /// /// Given /// \code /// void a(int); /// void b(long); /// void c(double); /// \endcode /// functionDecl(hasAnyParameter(hasType(isInteger()))) /// matches "a(int)", "b(long)", but not "c(double)". AST_MATCHER(QualType, isInteger) { return Node->isIntegerType(); } /// Matches QualType nodes that are of unsigned integer type. /// /// Given /// \code /// void a(int); /// void b(unsigned long); /// void c(double); /// \endcode /// functionDecl(hasAnyParameter(hasType(isUnsignedInteger()))) /// matches "b(unsigned long)", but not "a(int)" and "c(double)". AST_MATCHER(QualType, isUnsignedInteger) { return Node->isUnsignedIntegerType(); } /// Matches QualType nodes that are of signed integer type. /// /// Given /// \code /// void a(int); /// void b(unsigned long); /// void c(double); /// \endcode /// functionDecl(hasAnyParameter(hasType(isSignedInteger()))) /// matches "a(int)", but not "b(unsigned long)" and "c(double)". AST_MATCHER(QualType, isSignedInteger) { return Node->isSignedIntegerType(); } /// Matches QualType nodes that are of character type. /// /// Given /// \code /// void a(char); /// void b(wchar_t); /// void c(double); /// \endcode /// functionDecl(hasAnyParameter(hasType(isAnyCharacter()))) /// matches "a(char)", "b(wchar_t)", but not "c(double)". AST_MATCHER(QualType, isAnyCharacter) { return Node->isAnyCharacterType(); } /// Matches QualType nodes that are of any pointer type; this includes /// the Objective-C object pointer type, which is different despite being /// syntactically similar. /// /// Given /// \code /// int *i = nullptr; /// /// @interface Foo /// @end /// Foo *f; /// /// int j; /// \endcode /// varDecl(hasType(isAnyPointer())) /// matches "int *i" and "Foo *f", but not "int j". AST_MATCHER(QualType, isAnyPointer) { return Node->isAnyPointerType(); } /// Matches QualType nodes that are const-qualified, i.e., that /// include "top-level" const. /// /// Given /// \code /// void a(int); /// void b(int const); /// void c(const int); /// void d(const int*); /// void e(int const) {}; /// \endcode /// functionDecl(hasAnyParameter(hasType(isConstQualified()))) /// matches "void b(int const)", "void c(const int)" and /// "void e(int const) {}". It does not match d as there /// is no top-level const on the parameter type "const int *". AST_MATCHER(QualType, isConstQualified) { return Node.isConstQualified(); } /// Matches QualType nodes that are volatile-qualified, i.e., that /// include "top-level" volatile. /// /// Given /// \code /// void a(int); /// void b(int volatile); /// void c(volatile int); /// void d(volatile int*); /// void e(int volatile) {}; /// \endcode /// functionDecl(hasAnyParameter(hasType(isVolatileQualified()))) /// matches "void b(int volatile)", "void c(volatile int)" and /// "void e(int volatile) {}". It does not match d as there /// is no top-level volatile on the parameter type "volatile int *". AST_MATCHER(QualType, isVolatileQualified) { return Node.isVolatileQualified(); } /// Matches QualType nodes that have local CV-qualifiers attached to /// the node, not hidden within a typedef. /// /// Given /// \code /// typedef const int const_int; /// const_int i; /// int *const j; /// int *volatile k; /// int m; /// \endcode /// \c varDecl(hasType(hasLocalQualifiers())) matches only \c j and \c k. /// \c i is const-qualified but the qualifier is not local. AST_MATCHER(QualType, hasLocalQualifiers) { return Node.hasLocalQualifiers(); } /// Matches a member expression where the member is matched by a /// given matcher. /// /// Given /// \code /// struct { int first, second; } first, second; /// int i(second.first); /// int j(first.second); /// \endcode /// memberExpr(member(hasName("first"))) /// matches second.first /// but not first.second (because the member name there is "second"). AST_MATCHER_P(MemberExpr, member, internal::Matcher<ValueDecl>, InnerMatcher) { return InnerMatcher.matches(*Node.getMemberDecl(), Finder, Builder); } /// Matches a member expression where the object expression is matched by a /// given matcher. Implicit object expressions are included; that is, it matches /// use of implicit `this`. /// /// Given /// \code /// struct X { /// int m; /// int f(X x) { x.m; return m; } /// }; /// \endcode /// memberExpr(hasObjectExpression(hasType(cxxRecordDecl(hasName("X"))))) /// matches `x.m`, but not `m`; however, /// memberExpr(hasObjectExpression(hasType(pointsTo( // cxxRecordDecl(hasName("X")))))) /// matches `m` (aka. `this->m`), but not `x.m`. AST_POLYMORPHIC_MATCHER_P( hasObjectExpression, AST_POLYMORPHIC_SUPPORTED_TYPES(MemberExpr, UnresolvedMemberExpr, CXXDependentScopeMemberExpr), internal::Matcher<Expr>, InnerMatcher) { if (const auto *E = dyn_cast<UnresolvedMemberExpr>(&Node)) if (E->isImplicitAccess()) return false; if (const auto *E = dyn_cast<CXXDependentScopeMemberExpr>(&Node)) if (E->isImplicitAccess()) return false; return InnerMatcher.matches(*Node.getBase(), Finder, Builder); } /// Matches any using shadow declaration. /// /// Given /// \code /// namespace X { void b(); } /// using X::b; /// \endcode /// usingDecl(hasAnyUsingShadowDecl(hasName("b")))) /// matches \code using X::b \endcode AST_MATCHER_P(UsingDecl, hasAnyUsingShadowDecl, internal::Matcher<UsingShadowDecl>, InnerMatcher) { return matchesFirstInPointerRange(InnerMatcher, Node.shadow_begin(), Node.shadow_end(), Finder, Builder); } /// Matches a using shadow declaration where the target declaration is /// matched by the given matcher. /// /// Given /// \code /// namespace X { int a; void b(); } /// using X::a; /// using X::b; /// \endcode /// usingDecl(hasAnyUsingShadowDecl(hasTargetDecl(functionDecl()))) /// matches \code using X::b \endcode /// but not \code using X::a \endcode AST_MATCHER_P(UsingShadowDecl, hasTargetDecl, internal::Matcher<NamedDecl>, InnerMatcher) { return InnerMatcher.matches(*Node.getTargetDecl(), Finder, Builder); } /// Matches template instantiations of function, class, or static /// member variable template instantiations. /// /// Given /// \code /// template <typename T> class X {}; class A {}; X<A> x; /// \endcode /// or /// \code /// template <typename T> class X {}; class A {}; template class X<A>; /// \endcode /// or /// \code /// template <typename T> class X {}; class A {}; extern template class X<A>; /// \endcode /// cxxRecordDecl(hasName("::X"), isTemplateInstantiation()) /// matches the template instantiation of X<A>. /// /// But given /// \code /// template <typename T> class X {}; class A {}; /// template <> class X<A> {}; X<A> x; /// \endcode /// cxxRecordDecl(hasName("::X"), isTemplateInstantiation()) /// does not match, as X<A> is an explicit template specialization. /// /// Usable as: Matcher<FunctionDecl>, Matcher<VarDecl>, Matcher<CXXRecordDecl> AST_POLYMORPHIC_MATCHER(isTemplateInstantiation, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl, CXXRecordDecl)) { return (Node.getTemplateSpecializationKind() == TSK_ImplicitInstantiation || Node.getTemplateSpecializationKind() == TSK_ExplicitInstantiationDefinition || Node.getTemplateSpecializationKind() == TSK_ExplicitInstantiationDeclaration); } /// Matches declarations that are template instantiations or are inside /// template instantiations. /// /// Given /// \code /// template<typename T> void A(T t) { T i; } /// A(0); /// A(0U); /// \endcode /// functionDecl(isInstantiated()) /// matches 'A(int) {...};' and 'A(unsigned) {...}'. AST_MATCHER_FUNCTION(internal::Matcher<Decl>, isInstantiated) { auto IsInstantiation = decl(anyOf(cxxRecordDecl(isTemplateInstantiation()), functionDecl(isTemplateInstantiation()))); return decl(anyOf(IsInstantiation, hasAncestor(IsInstantiation))); } /// Matches statements inside of a template instantiation. /// /// Given /// \code /// int j; /// template<typename T> void A(T t) { T i; j += 42;} /// A(0); /// A(0U); /// \endcode /// declStmt(isInTemplateInstantiation()) /// matches 'int i;' and 'unsigned i'. /// unless(stmt(isInTemplateInstantiation())) /// will NOT match j += 42; as it's shared between the template definition and /// instantiation. AST_MATCHER_FUNCTION(internal::Matcher<Stmt>, isInTemplateInstantiation) { return stmt( hasAncestor(decl(anyOf(cxxRecordDecl(isTemplateInstantiation()), functionDecl(isTemplateInstantiation()))))); } /// Matches explicit template specializations of function, class, or /// static member variable template instantiations. /// /// Given /// \code /// template<typename T> void A(T t) { } /// template<> void A(int N) { } /// \endcode /// functionDecl(isExplicitTemplateSpecialization()) /// matches the specialization A<int>(). /// /// Usable as: Matcher<FunctionDecl>, Matcher<VarDecl>, Matcher<CXXRecordDecl> AST_POLYMORPHIC_MATCHER(isExplicitTemplateSpecialization, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl, CXXRecordDecl)) { return (Node.getTemplateSpecializationKind() == TSK_ExplicitSpecialization); } /// Matches \c TypeLocs for which the given inner /// QualType-matcher matches. AST_MATCHER_FUNCTION_P_OVERLOAD(internal::BindableMatcher<TypeLoc>, loc, internal::Matcher<QualType>, InnerMatcher, 0) { return internal::BindableMatcher<TypeLoc>( new internal::TypeLocTypeMatcher(InnerMatcher)); } /// Matches type \c bool. /// /// Given /// \code /// struct S { bool func(); }; /// \endcode /// functionDecl(returns(booleanType())) /// matches "bool func();" AST_MATCHER(Type, booleanType) { return Node.isBooleanType(); } /// Matches type \c void. /// /// Given /// \code /// struct S { void func(); }; /// \endcode /// functionDecl(returns(voidType())) /// matches "void func();" AST_MATCHER(Type, voidType) { return Node.isVoidType(); } template <typename NodeType> using AstTypeMatcher = internal::VariadicDynCastAllOfMatcher<Type, NodeType>; /// Matches builtin Types. /// /// Given /// \code /// struct A {}; /// A a; /// int b; /// float c; /// bool d; /// \endcode /// builtinType() /// matches "int b", "float c" and "bool d" extern const AstTypeMatcher<BuiltinType> builtinType; /// Matches all kinds of arrays. /// /// Given /// \code /// int a[] = { 2, 3 }; /// int b[4]; /// void f() { int c[a[0]]; } /// \endcode /// arrayType() /// matches "int a[]", "int b[4]" and "int c[a[0]]"; extern const AstTypeMatcher<ArrayType> arrayType; /// Matches C99 complex types. /// /// Given /// \code /// _Complex float f; /// \endcode /// complexType() /// matches "_Complex float f" extern const AstTypeMatcher<ComplexType> complexType; /// Matches any real floating-point type (float, double, long double). /// /// Given /// \code /// int i; /// float f; /// \endcode /// realFloatingPointType() /// matches "float f" but not "int i" AST_MATCHER(Type, realFloatingPointType) { return Node.isRealFloatingType(); } /// Matches arrays and C99 complex types that have a specific element /// type. /// /// Given /// \code /// struct A {}; /// A a[7]; /// int b[7]; /// \endcode /// arrayType(hasElementType(builtinType())) /// matches "int b[7]" /// /// Usable as: Matcher<ArrayType>, Matcher<ComplexType> AST_TYPELOC_TRAVERSE_MATCHER_DECL(hasElementType, getElement, AST_POLYMORPHIC_SUPPORTED_TYPES(ArrayType, ComplexType)); /// Matches C arrays with a specified constant size. /// /// Given /// \code /// void() { /// int a[2]; /// int b[] = { 2, 3 }; /// int c[b[0]]; /// } /// \endcode /// constantArrayType() /// matches "int a[2]" extern const AstTypeMatcher<ConstantArrayType> constantArrayType; /// Matches nodes that have the specified size. /// /// Given /// \code /// int a[42]; /// int b[2 * 21]; /// int c[41], d[43]; /// char *s = "abcd"; /// wchar_t *ws = L"abcd"; /// char *w = "a"; /// \endcode /// constantArrayType(hasSize(42)) /// matches "int a[42]" and "int b[2 * 21]" /// stringLiteral(hasSize(4)) /// matches "abcd", L"abcd" AST_POLYMORPHIC_MATCHER_P(hasSize, AST_POLYMORPHIC_SUPPORTED_TYPES(ConstantArrayType, StringLiteral), unsigned, N) { return internal::HasSizeMatcher<NodeType>::hasSize(Node, N); } /// Matches C++ arrays whose size is a value-dependent expression. /// /// Given /// \code /// template<typename T, int Size> /// class array { /// T data[Size]; /// }; /// \endcode /// dependentSizedArrayType /// matches "T data[Size]" extern const AstTypeMatcher<DependentSizedArrayType> dependentSizedArrayType; /// Matches C arrays with unspecified size. /// /// Given /// \code /// int a[] = { 2, 3 }; /// int b[42]; /// void f(int c[]) { int d[a[0]]; }; /// \endcode /// incompleteArrayType() /// matches "int a[]" and "int c[]" extern const AstTypeMatcher<IncompleteArrayType> incompleteArrayType; /// Matches C arrays with a specified size that is not an /// integer-constant-expression. /// /// Given /// \code /// void f() { /// int a[] = { 2, 3 } /// int b[42]; /// int c[a[0]]; /// } /// \endcode /// variableArrayType() /// matches "int c[a[0]]" extern const AstTypeMatcher<VariableArrayType> variableArrayType; /// Matches \c VariableArrayType nodes that have a specific size /// expression. /// /// Given /// \code /// void f(int b) { /// int a[b]; /// } /// \endcode /// variableArrayType(hasSizeExpr(ignoringImpCasts(declRefExpr(to( /// varDecl(hasName("b"))))))) /// matches "int a[b]" AST_MATCHER_P(VariableArrayType, hasSizeExpr, internal::Matcher<Expr>, InnerMatcher) { return InnerMatcher.matches(*Node.getSizeExpr(), Finder, Builder); } /// Matches atomic types. /// /// Given /// \code /// _Atomic(int) i; /// \endcode /// atomicType() /// matches "_Atomic(int) i" extern const AstTypeMatcher<AtomicType> atomicType; /// Matches atomic types with a specific value type. /// /// Given /// \code /// _Atomic(int) i; /// _Atomic(float) f; /// \endcode /// atomicType(hasValueType(isInteger())) /// matches "_Atomic(int) i" /// /// Usable as: Matcher<AtomicType> AST_TYPELOC_TRAVERSE_MATCHER_DECL(hasValueType, getValue, AST_POLYMORPHIC_SUPPORTED_TYPES(AtomicType)); /// Matches types nodes representing C++11 auto types. /// /// Given: /// \code /// auto n = 4; /// int v[] = { 2, 3 } /// for (auto i : v) { } /// \endcode /// autoType() /// matches "auto n" and "auto i" extern const AstTypeMatcher<AutoType> autoType; /// Matches types nodes representing C++11 decltype(<expr>) types. /// /// Given: /// \code /// short i = 1; /// int j = 42; /// decltype(i + j) result = i + j; /// \endcode /// decltypeType() /// matches "decltype(i + j)" extern const AstTypeMatcher<DecltypeType> decltypeType; /// Matches \c AutoType nodes where the deduced type is a specific type. /// /// Note: There is no \c TypeLoc for the deduced type and thus no /// \c getDeducedLoc() matcher. /// /// Given /// \code /// auto a = 1; /// auto b = 2.0; /// \endcode /// autoType(hasDeducedType(isInteger())) /// matches "auto a" /// /// Usable as: Matcher<AutoType> AST_TYPE_TRAVERSE_MATCHER(hasDeducedType, getDeducedType, AST_POLYMORPHIC_SUPPORTED_TYPES(AutoType)); /// Matches \c DecltypeType nodes to find out the underlying type. /// /// Given /// \code /// decltype(1) a = 1; /// decltype(2.0) b = 2.0; /// \endcode /// decltypeType(hasUnderlyingType(isInteger())) /// matches the type of "a" /// /// Usable as: Matcher<DecltypeType> AST_TYPE_TRAVERSE_MATCHER(hasUnderlyingType, getUnderlyingType, AST_POLYMORPHIC_SUPPORTED_TYPES(DecltypeType)); /// Matches \c FunctionType nodes. /// /// Given /// \code /// int (*f)(int); /// void g(); /// \endcode /// functionType() /// matches "int (*f)(int)" and the type of "g". extern const AstTypeMatcher<FunctionType> functionType; /// Matches \c FunctionProtoType nodes. /// /// Given /// \code /// int (*f)(int); /// void g(); /// \endcode /// functionProtoType() /// matches "int (*f)(int)" and the type of "g" in C++ mode. /// In C mode, "g" is not matched because it does not contain a prototype. extern const AstTypeMatcher<FunctionProtoType> functionProtoType; /// Matches \c ParenType nodes. /// /// Given /// \code /// int (*ptr_to_array)[4]; /// int *array_of_ptrs[4]; /// \endcode /// /// \c varDecl(hasType(pointsTo(parenType()))) matches \c ptr_to_array but not /// \c array_of_ptrs. extern const AstTypeMatcher<ParenType> parenType; /// Matches \c ParenType nodes where the inner type is a specific type. /// /// Given /// \code /// int (*ptr_to_array)[4]; /// int (*ptr_to_func)(int); /// \endcode /// /// \c varDecl(hasType(pointsTo(parenType(innerType(functionType()))))) matches /// \c ptr_to_func but not \c ptr_to_array. /// /// Usable as: Matcher<ParenType> AST_TYPE_TRAVERSE_MATCHER(innerType, getInnerType, AST_POLYMORPHIC_SUPPORTED_TYPES(ParenType)); /// Matches block pointer types, i.e. types syntactically represented as /// "void (^)(int)". /// /// The \c pointee is always required to be a \c FunctionType. extern const AstTypeMatcher<BlockPointerType> blockPointerType; /// Matches member pointer types. /// Given /// \code /// struct A { int i; } /// A::* ptr = A::i; /// \endcode /// memberPointerType() /// matches "A::* ptr" extern const AstTypeMatcher<MemberPointerType> memberPointerType; /// Matches pointer types, but does not match Objective-C object pointer /// types. /// /// Given /// \code /// int *a; /// int &b = *a; /// int c = 5; /// /// @interface Foo /// @end /// Foo *f; /// \endcode /// pointerType() /// matches "int *a", but does not match "Foo *f". extern const AstTypeMatcher<PointerType> pointerType; /// Matches an Objective-C object pointer type, which is different from /// a pointer type, despite being syntactically similar. /// /// Given /// \code /// int *a; /// /// @interface Foo /// @end /// Foo *f; /// \endcode /// pointerType() /// matches "Foo *f", but does not match "int *a". extern const AstTypeMatcher<ObjCObjectPointerType> objcObjectPointerType; /// Matches both lvalue and rvalue reference types. /// /// Given /// \code /// int *a; /// int &b = *a; /// int &&c = 1; /// auto &d = b; /// auto &&e = c; /// auto &&f = 2; /// int g = 5; /// \endcode /// /// \c referenceType() matches the types of \c b, \c c, \c d, \c e, and \c f. extern const AstTypeMatcher<ReferenceType> referenceType; /// Matches lvalue reference types. /// /// Given: /// \code /// int *a; /// int &b = *a; /// int &&c = 1; /// auto &d = b; /// auto &&e = c; /// auto &&f = 2; /// int g = 5; /// \endcode /// /// \c lValueReferenceType() matches the types of \c b, \c d, and \c e. \c e is /// matched since the type is deduced as int& by reference collapsing rules. extern const AstTypeMatcher<LValueReferenceType> lValueReferenceType; /// Matches rvalue reference types. /// /// Given: /// \code /// int *a; /// int &b = *a; /// int &&c = 1; /// auto &d = b; /// auto &&e = c; /// auto &&f = 2; /// int g = 5; /// \endcode /// /// \c rValueReferenceType() matches the types of \c c and \c f. \c e is not /// matched as it is deduced to int& by reference collapsing rules. extern const AstTypeMatcher<RValueReferenceType> rValueReferenceType; /// Narrows PointerType (and similar) matchers to those where the /// \c pointee matches a given matcher. /// /// Given /// \code /// int *a; /// int const *b; /// float const *f; /// \endcode /// pointerType(pointee(isConstQualified(), isInteger())) /// matches "int const *b" /// /// Usable as: Matcher<BlockPointerType>, Matcher<MemberPointerType>, /// Matcher<PointerType>, Matcher<ReferenceType> AST_TYPELOC_TRAVERSE_MATCHER_DECL( pointee, getPointee, AST_POLYMORPHIC_SUPPORTED_TYPES(BlockPointerType, MemberPointerType, PointerType, ReferenceType)); /// Matches typedef types. /// /// Given /// \code /// typedef int X; /// \endcode /// typedefType() /// matches "typedef int X" extern const AstTypeMatcher<TypedefType> typedefType; /// Matches enum types. /// /// Given /// \code /// enum C { Green }; /// enum class S { Red }; /// /// C c; /// S s; /// \endcode // /// \c enumType() matches the type of the variable declarations of both \c c and /// \c s. extern const AstTypeMatcher<EnumType> enumType; /// Matches template specialization types. /// /// Given /// \code /// template <typename T> /// class C { }; /// /// template class C<int>; // A /// C<char> var; // B /// \endcode /// /// \c templateSpecializationType() matches the type of the explicit /// instantiation in \c A and the type of the variable declaration in \c B. extern const AstTypeMatcher<TemplateSpecializationType> templateSpecializationType; /// Matches C++17 deduced template specialization types, e.g. deduced class /// template types. /// /// Given /// \code /// template <typename T> /// class C { public: C(T); }; /// /// C c(123); /// \endcode /// \c deducedTemplateSpecializationType() matches the type in the declaration /// of the variable \c c. extern const AstTypeMatcher<DeducedTemplateSpecializationType> deducedTemplateSpecializationType; /// Matches types nodes representing unary type transformations. /// /// Given: /// \code /// typedef __underlying_type(T) type; /// \endcode /// unaryTransformType() /// matches "__underlying_type(T)" extern const AstTypeMatcher<UnaryTransformType> unaryTransformType; /// Matches record types (e.g. structs, classes). /// /// Given /// \code /// class C {}; /// struct S {}; /// /// C c; /// S s; /// \endcode /// /// \c recordType() matches the type of the variable declarations of both \c c /// and \c s. extern const AstTypeMatcher<RecordType> recordType; /// Matches tag types (record and enum types). /// /// Given /// \code /// enum E {}; /// class C {}; /// /// E e; /// C c; /// \endcode /// /// \c tagType() matches the type of the variable declarations of both \c e /// and \c c. extern const AstTypeMatcher<TagType> tagType; /// Matches types specified with an elaborated type keyword or with a /// qualified name. /// /// Given /// \code /// namespace N { /// namespace M { /// class D {}; /// } /// } /// class C {}; /// /// class C c; /// N::M::D d; /// \endcode /// /// \c elaboratedType() matches the type of the variable declarations of both /// \c c and \c d. extern const AstTypeMatcher<ElaboratedType> elaboratedType; /// Matches ElaboratedTypes whose qualifier, a NestedNameSpecifier, /// matches \c InnerMatcher if the qualifier exists. /// /// Given /// \code /// namespace N { /// namespace M { /// class D {}; /// } /// } /// N::M::D d; /// \endcode /// /// \c elaboratedType(hasQualifier(hasPrefix(specifiesNamespace(hasName("N")))) /// matches the type of the variable declaration of \c d. AST_MATCHER_P(ElaboratedType, hasQualifier, internal::Matcher<NestedNameSpecifier>, InnerMatcher) { if (const NestedNameSpecifier *Qualifier = Node.getQualifier()) return InnerMatcher.matches(*Qualifier, Finder, Builder); return false; } /// Matches ElaboratedTypes whose named type matches \c InnerMatcher. /// /// Given /// \code /// namespace N { /// namespace M { /// class D {}; /// } /// } /// N::M::D d; /// \endcode /// /// \c elaboratedType(namesType(recordType( /// hasDeclaration(namedDecl(hasName("D")))))) matches the type of the variable /// declaration of \c d. AST_MATCHER_P(ElaboratedType, namesType, internal::Matcher<QualType>, InnerMatcher) { return InnerMatcher.matches(Node.getNamedType(), Finder, Builder); } /// Matches types that represent the result of substituting a type for a /// template type parameter. /// /// Given /// \code /// template <typename T> /// void F(T t) { /// int i = 1 + t; /// } /// \endcode /// /// \c substTemplateTypeParmType() matches the type of 't' but not '1' extern const AstTypeMatcher<SubstTemplateTypeParmType> substTemplateTypeParmType; /// Matches template type parameter substitutions that have a replacement /// type that matches the provided matcher. /// /// Given /// \code /// template <typename T> /// double F(T t); /// int i; /// double j = F(i); /// \endcode /// /// \c substTemplateTypeParmType(hasReplacementType(type())) matches int AST_TYPE_TRAVERSE_MATCHER( hasReplacementType, getReplacementType, AST_POLYMORPHIC_SUPPORTED_TYPES(SubstTemplateTypeParmType)); /// Matches template type parameter types. /// /// Example matches T, but not int. /// (matcher = templateTypeParmType()) /// \code /// template <typename T> void f(int i); /// \endcode extern const AstTypeMatcher<TemplateTypeParmType> templateTypeParmType; /// Matches injected class name types. /// /// Example matches S s, but not S<T> s. /// (matcher = parmVarDecl(hasType(injectedClassNameType()))) /// \code /// template <typename T> struct S { /// void f(S s); /// void g(S<T> s); /// }; /// \endcode extern const AstTypeMatcher<InjectedClassNameType> injectedClassNameType; /// Matches decayed type /// Example matches i[] in declaration of f. /// (matcher = valueDecl(hasType(decayedType(hasDecayedType(pointerType()))))) /// Example matches i[1]. /// (matcher = expr(hasType(decayedType(hasDecayedType(pointerType()))))) /// \code /// void f(int i[]) { /// i[1] = 0; /// } /// \endcode extern const AstTypeMatcher<DecayedType> decayedType; /// Matches the decayed type, whoes decayed type matches \c InnerMatcher AST_MATCHER_P(DecayedType, hasDecayedType, internal::Matcher<QualType>, InnerType) { return InnerType.matches(Node.getDecayedType(), Finder, Builder); } /// Matches declarations whose declaration context, interpreted as a /// Decl, matches \c InnerMatcher. /// /// Given /// \code /// namespace N { /// namespace M { /// class D {}; /// } /// } /// \endcode /// /// \c cxxRcordDecl(hasDeclContext(namedDecl(hasName("M")))) matches the /// declaration of \c class \c D. AST_MATCHER_P(Decl, hasDeclContext, internal::Matcher<Decl>, InnerMatcher) { const DeclContext *DC = Node.getDeclContext(); if (!DC) return false; return InnerMatcher.matches(*Decl::castFromDeclContext(DC), Finder, Builder); } /// Matches nested name specifiers. /// /// Given /// \code /// namespace ns { /// struct A { static void f(); }; /// void A::f() {} /// void g() { A::f(); } /// } /// ns::A a; /// \endcode /// nestedNameSpecifier() /// matches "ns::" and both "A::" extern const internal::VariadicAllOfMatcher<NestedNameSpecifier> nestedNameSpecifier; /// Same as \c nestedNameSpecifier but matches \c NestedNameSpecifierLoc. extern const internal::VariadicAllOfMatcher<NestedNameSpecifierLoc> nestedNameSpecifierLoc; /// Matches \c NestedNameSpecifierLocs for which the given inner /// NestedNameSpecifier-matcher matches. AST_MATCHER_FUNCTION_P_OVERLOAD( internal::BindableMatcher<NestedNameSpecifierLoc>, loc, internal::Matcher<NestedNameSpecifier>, InnerMatcher, 1) { return internal::BindableMatcher<NestedNameSpecifierLoc>( new internal::LocMatcher<NestedNameSpecifierLoc, NestedNameSpecifier>( InnerMatcher)); } /// Matches nested name specifiers that specify a type matching the /// given \c QualType matcher without qualifiers. /// /// Given /// \code /// struct A { struct B { struct C {}; }; }; /// A::B::C c; /// \endcode /// nestedNameSpecifier(specifiesType( /// hasDeclaration(cxxRecordDecl(hasName("A"))) /// )) /// matches "A::" AST_MATCHER_P(NestedNameSpecifier, specifiesType, internal::Matcher<QualType>, InnerMatcher) { if (!Node.getAsType()) return false; return InnerMatcher.matches(QualType(Node.getAsType(), 0), Finder, Builder); } /// Matches nested name specifier locs that specify a type matching the /// given \c TypeLoc. /// /// Given /// \code /// struct A { struct B { struct C {}; }; }; /// A::B::C c; /// \endcode /// nestedNameSpecifierLoc(specifiesTypeLoc(loc(type( /// hasDeclaration(cxxRecordDecl(hasName("A"))))))) /// matches "A::" AST_MATCHER_P(NestedNameSpecifierLoc, specifiesTypeLoc, internal::Matcher<TypeLoc>, InnerMatcher) { return Node && Node.getNestedNameSpecifier()->getAsType() && InnerMatcher.matches(Node.getTypeLoc(), Finder, Builder); } /// Matches on the prefix of a \c NestedNameSpecifier. /// /// Given /// \code /// struct A { struct B { struct C {}; }; }; /// A::B::C c; /// \endcode /// nestedNameSpecifier(hasPrefix(specifiesType(asString("struct A")))) and /// matches "A::" AST_MATCHER_P_OVERLOAD(NestedNameSpecifier, hasPrefix, internal::Matcher<NestedNameSpecifier>, InnerMatcher, 0) { const NestedNameSpecifier *NextNode = Node.getPrefix(); if (!NextNode) return false; return InnerMatcher.matches(*NextNode, Finder, Builder); } /// Matches on the prefix of a \c NestedNameSpecifierLoc. /// /// Given /// \code /// struct A { struct B { struct C {}; }; }; /// A::B::C c; /// \endcode /// nestedNameSpecifierLoc(hasPrefix(loc(specifiesType(asString("struct A"))))) /// matches "A::" AST_MATCHER_P_OVERLOAD(NestedNameSpecifierLoc, hasPrefix, internal::Matcher<NestedNameSpecifierLoc>, InnerMatcher, 1) { NestedNameSpecifierLoc NextNode = Node.getPrefix(); if (!NextNode) return false; return InnerMatcher.matches(NextNode, Finder, Builder); } /// Matches nested name specifiers that specify a namespace matching the /// given namespace matcher. /// /// Given /// \code /// namespace ns { struct A {}; } /// ns::A a; /// \endcode /// nestedNameSpecifier(specifiesNamespace(hasName("ns"))) /// matches "ns::" AST_MATCHER_P(NestedNameSpecifier, specifiesNamespace, internal::Matcher<NamespaceDecl>, InnerMatcher) { if (!Node.getAsNamespace()) return false; return InnerMatcher.matches(*Node.getAsNamespace(), Finder, Builder); } /// Overloads for the \c equalsNode matcher. /// FIXME: Implement for other node types. /// @{ /// Matches if a node equals another node. /// /// \c Decl has pointer identity in the AST. AST_MATCHER_P_OVERLOAD(Decl, equalsNode, const Decl*, Other, 0) { return &Node == Other; } /// Matches if a node equals another node. /// /// \c Stmt has pointer identity in the AST. AST_MATCHER_P_OVERLOAD(Stmt, equalsNode, const Stmt*, Other, 1) { return &Node == Other; } /// Matches if a node equals another node. /// /// \c Type has pointer identity in the AST. AST_MATCHER_P_OVERLOAD(Type, equalsNode, const Type*, Other, 2) { return &Node == Other; } /// @} /// Matches each case or default statement belonging to the given switch /// statement. This matcher may produce multiple matches. /// /// Given /// \code /// switch (1) { case 1: case 2: default: switch (2) { case 3: case 4: ; } } /// \endcode /// switchStmt(forEachSwitchCase(caseStmt().bind("c"))).bind("s") /// matches four times, with "c" binding each of "case 1:", "case 2:", /// "case 3:" and "case 4:", and "s" respectively binding "switch (1)", /// "switch (1)", "switch (2)" and "switch (2)". AST_MATCHER_P(SwitchStmt, forEachSwitchCase, internal::Matcher<SwitchCase>, InnerMatcher) { BoundNodesTreeBuilder Result; // FIXME: getSwitchCaseList() does not necessarily guarantee a stable // iteration order. We should use the more general iterating matchers once // they are capable of expressing this matcher (for example, it should ignore // case statements belonging to nested switch statements). bool Matched = false; for (const SwitchCase *SC = Node.getSwitchCaseList(); SC; SC = SC->getNextSwitchCase()) { BoundNodesTreeBuilder CaseBuilder(*Builder); bool CaseMatched = InnerMatcher.matches(*SC, Finder, &CaseBuilder); if (CaseMatched) { Matched = true; Result.addMatch(CaseBuilder); } } *Builder = std::move(Result); return Matched; } /// Matches each constructor initializer in a constructor definition. /// /// Given /// \code /// class A { A() : i(42), j(42) {} int i; int j; }; /// \endcode /// cxxConstructorDecl(forEachConstructorInitializer( /// forField(decl().bind("x")) /// )) /// will trigger two matches, binding for 'i' and 'j' respectively. AST_MATCHER_P(CXXConstructorDecl, forEachConstructorInitializer, internal::Matcher<CXXCtorInitializer>, InnerMatcher) { BoundNodesTreeBuilder Result; bool Matched = false; for (const auto *I : Node.inits()) { BoundNodesTreeBuilder InitBuilder(*Builder); if (InnerMatcher.matches(*I, Finder, &InitBuilder)) { Matched = true; Result.addMatch(InitBuilder); } } *Builder = std::move(Result); return Matched; } /// Matches constructor declarations that are copy constructors. /// /// Given /// \code /// struct S { /// S(); // #1 /// S(const S &); // #2 /// S(S &&); // #3 /// }; /// \endcode /// cxxConstructorDecl(isCopyConstructor()) will match #2, but not #1 or #3. AST_MATCHER(CXXConstructorDecl, isCopyConstructor) { return Node.isCopyConstructor(); } /// Matches constructor declarations that are move constructors. /// /// Given /// \code /// struct S { /// S(); // #1 /// S(const S &); // #2 /// S(S &&); // #3 /// }; /// \endcode /// cxxConstructorDecl(isMoveConstructor()) will match #3, but not #1 or #2. AST_MATCHER(CXXConstructorDecl, isMoveConstructor) { return Node.isMoveConstructor(); } /// Matches constructor declarations that are default constructors. /// /// Given /// \code /// struct S { /// S(); // #1 /// S(const S &); // #2 /// S(S &&); // #3 /// }; /// \endcode /// cxxConstructorDecl(isDefaultConstructor()) will match #1, but not #2 or #3. AST_MATCHER(CXXConstructorDecl, isDefaultConstructor) { return Node.isDefaultConstructor(); } /// Matches constructors that delegate to another constructor. /// /// Given /// \code /// struct S { /// S(); // #1 /// S(int) {} // #2 /// S(S &&) : S() {} // #3 /// }; /// S::S() : S(0) {} // #4 /// \endcode /// cxxConstructorDecl(isDelegatingConstructor()) will match #3 and #4, but not /// #1 or #2. AST_MATCHER(CXXConstructorDecl, isDelegatingConstructor) { return Node.isDelegatingConstructor(); } /// Matches constructor, conversion function, and deduction guide declarations /// that have an explicit specifier if this explicit specifier is resolved to /// true. /// /// Given /// \code /// template<bool b> /// struct S { /// S(int); // #1 /// explicit S(double); // #2 /// operator int(); // #3 /// explicit operator bool(); // #4 /// explicit(false) S(bool) // # 7 /// explicit(true) S(char) // # 8 /// explicit(b) S(S) // # 9 /// }; /// S(int) -> S<true> // #5 /// explicit S(double) -> S<false> // #6 /// \endcode /// cxxConstructorDecl(isExplicit()) will match #2 and #8, but not #1, #7 or #9. /// cxxConversionDecl(isExplicit()) will match #4, but not #3. /// cxxDeductionGuideDecl(isExplicit()) will match #6, but not #5. AST_POLYMORPHIC_MATCHER(isExplicit, AST_POLYMORPHIC_SUPPORTED_TYPES( CXXConstructorDecl, CXXConversionDecl, CXXDeductionGuideDecl)) { return Node.isExplicit(); } /// Matches the expression in an explicit specifier if present in the given /// declaration. /// /// Given /// \code /// template<bool b> /// struct S { /// S(int); // #1 /// explicit S(double); // #2 /// operator int(); // #3 /// explicit operator bool(); // #4 /// explicit(false) S(bool) // # 7 /// explicit(true) S(char) // # 8 /// explicit(b) S(S) // # 9 /// }; /// S(int) -> S<true> // #5 /// explicit S(double) -> S<false> // #6 /// \endcode /// cxxConstructorDecl(hasExplicitSpecifier(constantExpr())) will match #7, #8 and #9, but not #1 or #2. /// cxxConversionDecl(hasExplicitSpecifier(constantExpr())) will not match #3 or #4. /// cxxDeductionGuideDecl(hasExplicitSpecifier(constantExpr())) will not match #5 or #6. AST_MATCHER_P(FunctionDecl, hasExplicitSpecifier, internal::Matcher<Expr>, InnerMatcher) { ExplicitSpecifier ES = ExplicitSpecifier::getFromDecl(&Node); if (!ES.getExpr()) return false; return InnerMatcher.matches(*ES.getExpr(), Finder, Builder); } /// Matches function and namespace declarations that are marked with /// the inline keyword. /// /// Given /// \code /// inline void f(); /// void g(); /// namespace n { /// inline namespace m {} /// } /// \endcode /// functionDecl(isInline()) will match ::f(). /// namespaceDecl(isInline()) will match n::m. AST_POLYMORPHIC_MATCHER(isInline, AST_POLYMORPHIC_SUPPORTED_TYPES(NamespaceDecl, FunctionDecl)) { // This is required because the spelling of the function used to determine // whether inline is specified or not differs between the polymorphic types. if (const auto *FD = dyn_cast<FunctionDecl>(&Node)) return FD->isInlineSpecified(); else if (const auto *NSD = dyn_cast<NamespaceDecl>(&Node)) return NSD->isInline(); llvm_unreachable("Not a valid polymorphic type"); } /// Matches anonymous namespace declarations. /// /// Given /// \code /// namespace n { /// namespace {} // #1 /// } /// \endcode /// namespaceDecl(isAnonymous()) will match #1 but not ::n. AST_MATCHER(NamespaceDecl, isAnonymous) { return Node.isAnonymousNamespace(); } /// Matches declarations in the namespace `std`, but not in nested namespaces. /// /// Given /// \code /// class vector {}; /// namespace foo { /// class vector {}; /// namespace std { /// class vector {}; /// } /// } /// namespace std { /// inline namespace __1 { /// class vector {}; // #1 /// namespace experimental { /// class vector {}; /// } /// } /// } /// \endcode /// cxxRecordDecl(hasName("vector"), isInStdNamespace()) will match only #1. AST_MATCHER(Decl, isInStdNamespace) { return Node.isInStdNamespace(); } /// If the given case statement does not use the GNU case range /// extension, matches the constant given in the statement. /// /// Given /// \code /// switch (1) { case 1: case 1+1: case 3 ... 4: ; } /// \endcode /// caseStmt(hasCaseConstant(integerLiteral())) /// matches "case 1:" AST_MATCHER_P(CaseStmt, hasCaseConstant, internal::Matcher<Expr>, InnerMatcher) { if (Node.getRHS()) return false; return InnerMatcher.matches(*Node.getLHS(), Finder, Builder); } /// Matches declaration that has a given attribute. /// /// Given /// \code /// __attribute__((device)) void f() { ... } /// \endcode /// decl(hasAttr(clang::attr::CUDADevice)) matches the function declaration of /// f. If the matcher is used from clang-query, attr::Kind parameter should be /// passed as a quoted string. e.g., hasAttr("attr::CUDADevice"). AST_MATCHER_P(Decl, hasAttr, attr::Kind, AttrKind) { for (const auto *Attr : Node.attrs()) { if (Attr->getKind() == AttrKind) return true; } return false; } /// Matches the return value expression of a return statement /// /// Given /// \code /// return a + b; /// \endcode /// hasReturnValue(binaryOperator()) /// matches 'return a + b' /// with binaryOperator() /// matching 'a + b' AST_MATCHER_P(ReturnStmt, hasReturnValue, internal::Matcher<Expr>, InnerMatcher) { if (const auto *RetValue = Node.getRetValue()) return InnerMatcher.matches(*RetValue, Finder, Builder); return false; } /// Matches CUDA kernel call expression. /// /// Example matches, /// \code /// kernel<<<i,j>>>(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CUDAKernelCallExpr> cudaKernelCallExpr; /// Matches expressions that resolve to a null pointer constant, such as /// GNU's __null, C++11's nullptr, or C's NULL macro. /// /// Given: /// \code /// void *v1 = NULL; /// void *v2 = nullptr; /// void *v3 = __null; // GNU extension /// char *cp = (char *)0; /// int *ip = 0; /// int i = 0; /// \endcode /// expr(nullPointerConstant()) /// matches the initializer for v1, v2, v3, cp, and ip. Does not match the /// initializer for i. AST_MATCHER(Expr, nullPointerConstant) { return Node.isNullPointerConstant(Finder->getASTContext(), Expr::NPC_ValueDependentIsNull); } /// Matches declaration of the function the statement belongs to /// /// Given: /// \code /// F& operator=(const F& o) { /// std::copy_if(o.begin(), o.end(), begin(), [](V v) { return v > 0; }); /// return *this; /// } /// \endcode /// returnStmt(forFunction(hasName("operator="))) /// matches 'return *this' /// but does not match 'return v > 0' AST_MATCHER_P(Stmt, forFunction, internal::Matcher<FunctionDecl>, InnerMatcher) { const auto &Parents = Finder->getASTContext().getParents(Node); llvm::SmallVector<DynTypedNode, 8> Stack(Parents.begin(), Parents.end()); while(!Stack.empty()) { const auto &CurNode = Stack.back(); Stack.pop_back(); if(const auto *FuncDeclNode = CurNode.get<FunctionDecl>()) { if(InnerMatcher.matches(*FuncDeclNode, Finder, Builder)) { return true; } } else if(const auto *LambdaExprNode = CurNode.get<LambdaExpr>()) { if(InnerMatcher.matches(*LambdaExprNode->getCallOperator(), Finder, Builder)) { return true; } } else { for(const auto &Parent: Finder->getASTContext().getParents(CurNode)) Stack.push_back(Parent); } } return false; } /// Matches a declaration that has external formal linkage. /// /// Example matches only z (matcher = varDecl(hasExternalFormalLinkage())) /// \code /// void f() { /// int x; /// static int y; /// } /// int z; /// \endcode /// /// Example matches f() because it has external formal linkage despite being /// unique to the translation unit as though it has internal likage /// (matcher = functionDecl(hasExternalFormalLinkage())) /// /// \code /// namespace { /// void f() {} /// } /// \endcode AST_MATCHER(NamedDecl, hasExternalFormalLinkage) { return Node.hasExternalFormalLinkage(); } /// Matches a declaration that has default arguments. /// /// Example matches y (matcher = parmVarDecl(hasDefaultArgument())) /// \code /// void x(int val) {} /// void y(int val = 0) {} /// \endcode /// /// Deprecated. Use hasInitializer() instead to be able to /// match on the contents of the default argument. For example: /// /// \code /// void x(int val = 7) {} /// void y(int val = 42) {} /// \endcode /// parmVarDecl(hasInitializer(integerLiteral(equals(42)))) /// matches the parameter of y /// /// A matcher such as /// parmVarDecl(hasInitializer(anything())) /// is equivalent to parmVarDecl(hasDefaultArgument()). AST_MATCHER(ParmVarDecl, hasDefaultArgument) { return Node.hasDefaultArg(); } /// Matches array new expressions. /// /// Given: /// \code /// MyClass *p1 = new MyClass[10]; /// \endcode /// cxxNewExpr(isArray()) /// matches the expression 'new MyClass[10]'. AST_MATCHER(CXXNewExpr, isArray) { return Node.isArray(); } /// Matches placement new expression arguments. /// /// Given: /// \code /// MyClass *p1 = new (Storage, 16) MyClass(); /// \endcode /// cxxNewExpr(hasPlacementArg(1, integerLiteral(equals(16)))) /// matches the expression 'new (Storage, 16) MyClass()'. AST_MATCHER_P2(CXXNewExpr, hasPlacementArg, unsigned, Index, internal::Matcher<Expr>, InnerMatcher) { return Node.getNumPlacementArgs() > Index && InnerMatcher.matches(*Node.getPlacementArg(Index), Finder, Builder); } /// Matches any placement new expression arguments. /// /// Given: /// \code /// MyClass *p1 = new (Storage) MyClass(); /// \endcode /// cxxNewExpr(hasAnyPlacementArg(anything())) /// matches the expression 'new (Storage, 16) MyClass()'. AST_MATCHER_P(CXXNewExpr, hasAnyPlacementArg, internal::Matcher<Expr>, InnerMatcher) { return llvm::any_of(Node.placement_arguments(), [&](const Expr *Arg) { return InnerMatcher.matches(*Arg, Finder, Builder); }); } /// Matches array new expressions with a given array size. /// /// Given: /// \code /// MyClass *p1 = new MyClass[10]; /// \endcode /// cxxNewExpr(hasArraySize(integerLiteral(equals(10)))) /// matches the expression 'new MyClass[10]'. AST_MATCHER_P(CXXNewExpr, hasArraySize, internal::Matcher<Expr>, InnerMatcher) { return Node.isArray() && *Node.getArraySize() && InnerMatcher.matches(**Node.getArraySize(), Finder, Builder); } /// Matches a class declaration that is defined. /// /// Example matches x (matcher = cxxRecordDecl(hasDefinition())) /// \code /// class x {}; /// class y; /// \endcode AST_MATCHER(CXXRecordDecl, hasDefinition) { return Node.hasDefinition(); } /// Matches C++11 scoped enum declaration. /// /// Example matches Y (matcher = enumDecl(isScoped())) /// \code /// enum X {}; /// enum class Y {}; /// \endcode AST_MATCHER(EnumDecl, isScoped) { return Node.isScoped(); } /// Matches a function declared with a trailing return type. /// /// Example matches Y (matcher = functionDecl(hasTrailingReturn())) /// \code /// int X() {} /// auto Y() -> int {} /// \endcode AST_MATCHER(FunctionDecl, hasTrailingReturn) { if (const auto *F = Node.getType()->getAs<FunctionProtoType>()) return F->hasTrailingReturn(); return false; } /// Matches expressions that match InnerMatcher that are possibly wrapped in an /// elidable constructor and other corresponding bookkeeping nodes. /// /// In C++17, elidable copy constructors are no longer being generated in the /// AST as it is not permitted by the standard. They are, however, part of the /// AST in C++14 and earlier. So, a matcher must abstract over these differences /// to work in all language modes. This matcher skips elidable constructor-call /// AST nodes, `ExprWithCleanups` nodes wrapping elidable constructor-calls and /// various implicit nodes inside the constructor calls, all of which will not /// appear in the C++17 AST. /// /// Given /// /// \code /// struct H {}; /// H G(); /// void f() { /// H D = G(); /// } /// \endcode /// /// ``varDecl(hasInitializer(ignoringElidableConstructorCall(callExpr())))`` /// matches ``H D = G()`` in C++11 through C++17 (and beyond). AST_MATCHER_P(Expr, ignoringElidableConstructorCall, ast_matchers::internal::Matcher<Expr>, InnerMatcher) { // E tracks the node that we are examining. const Expr *E = &Node; // If present, remove an outer `ExprWithCleanups` corresponding to the // underlying `CXXConstructExpr`. This check won't cover all cases of added // `ExprWithCleanups` corresponding to `CXXConstructExpr` nodes (because the // EWC is placed on the outermost node of the expression, which this may not // be), but, it still improves the coverage of this matcher. if (const auto *CleanupsExpr = dyn_cast<ExprWithCleanups>(&Node)) E = CleanupsExpr->getSubExpr(); if (const auto *CtorExpr = dyn_cast<CXXConstructExpr>(E)) { if (CtorExpr->isElidable()) { if (const auto *MaterializeTemp = dyn_cast<MaterializeTemporaryExpr>(CtorExpr->getArg(0))) { return InnerMatcher.matches(*MaterializeTemp->getSubExpr(), Finder, Builder); } } } return InnerMatcher.matches(Node, Finder, Builder); } //----------------------------------------------------------------------------// // OpenMP handling. //----------------------------------------------------------------------------// /// Matches any ``#pragma omp`` executable directive. /// /// Given /// /// \code /// #pragma omp parallel /// #pragma omp parallel default(none) /// #pragma omp taskyield /// \endcode /// /// ``ompExecutableDirective()`` matches ``omp parallel``, /// ``omp parallel default(none)`` and ``omp taskyield``. extern const internal::VariadicDynCastAllOfMatcher<Stmt, OMPExecutableDirective> ompExecutableDirective; /// Matches standalone OpenMP directives, /// i.e., directives that can't have a structured block. /// /// Given /// /// \code /// #pragma omp parallel /// {} /// #pragma omp taskyield /// \endcode /// /// ``ompExecutableDirective(isStandaloneDirective()))`` matches /// ``omp taskyield``. AST_MATCHER(OMPExecutableDirective, isStandaloneDirective) { return Node.isStandaloneDirective(); } /// Matches the structured-block of the OpenMP executable directive /// /// Prerequisite: the executable directive must not be standalone directive. /// If it is, it will never match. /// /// Given /// /// \code /// #pragma omp parallel /// ; /// #pragma omp parallel /// {} /// \endcode /// /// ``ompExecutableDirective(hasStructuredBlock(nullStmt()))`` will match ``;`` AST_MATCHER_P(OMPExecutableDirective, hasStructuredBlock, internal::Matcher<Stmt>, InnerMatcher) { if (Node.isStandaloneDirective()) return false; // Standalone directives have no structured blocks. return InnerMatcher.matches(*Node.getStructuredBlock(), Finder, Builder); } /// Matches any clause in an OpenMP directive. /// /// Given /// /// \code /// #pragma omp parallel /// #pragma omp parallel default(none) /// \endcode /// /// ``ompExecutableDirective(hasAnyClause(anything()))`` matches /// ``omp parallel default(none)``. AST_MATCHER_P(OMPExecutableDirective, hasAnyClause, internal::Matcher<OMPClause>, InnerMatcher) { ArrayRef<OMPClause *> Clauses = Node.clauses(); return matchesFirstInPointerRange(InnerMatcher, Clauses.begin(), Clauses.end(), Finder, Builder); } /// Matches OpenMP ``default`` clause. /// /// Given /// /// \code /// #pragma omp parallel default(none) /// #pragma omp parallel default(shared) /// #pragma omp parallel default(firstprivate) /// #pragma omp parallel /// \endcode /// /// ``ompDefaultClause()`` matches ``default(none)``, ``default(shared)``, and /// ``default(firstprivate)`` extern const internal::VariadicDynCastAllOfMatcher<OMPClause, OMPDefaultClause> ompDefaultClause; /// Matches if the OpenMP ``default`` clause has ``none`` kind specified. /// /// Given /// /// \code /// #pragma omp parallel /// #pragma omp parallel default(none) /// #pragma omp parallel default(shared) /// #pragma omp parallel default(firstprivate) /// \endcode /// /// ``ompDefaultClause(isNoneKind())`` matches only ``default(none)``. AST_MATCHER(OMPDefaultClause, isNoneKind) { return Node.getDefaultKind() == llvm::omp::OMP_DEFAULT_none; } /// Matches if the OpenMP ``default`` clause has ``shared`` kind specified. /// /// Given /// /// \code /// #pragma omp parallel /// #pragma omp parallel default(none) /// #pragma omp parallel default(shared) /// #pragma omp parallel default(firstprivate) /// \endcode /// /// ``ompDefaultClause(isSharedKind())`` matches only ``default(shared)``. AST_MATCHER(OMPDefaultClause, isSharedKind) { return Node.getDefaultKind() == llvm::omp::OMP_DEFAULT_shared; } /// Matches if the OpenMP ``default`` clause has ``firstprivate`` kind /// specified. /// /// Given /// /// \code /// #pragma omp parallel /// #pragma omp parallel default(none) /// #pragma omp parallel default(shared) /// #pragma omp parallel default(firstprivate) /// \endcode /// /// ``ompDefaultClause(isFirstPrivateKind())`` matches only /// ``default(firstprivate)``. AST_MATCHER(OMPDefaultClause, isFirstPrivateKind) { return Node.getDefaultKind() == llvm::omp::OMP_DEFAULT_firstprivate; } /// Matches if the OpenMP directive is allowed to contain the specified OpenMP /// clause kind. /// /// Given /// /// \code /// #pragma omp parallel /// #pragma omp parallel for /// #pragma omp for /// \endcode /// /// `ompExecutableDirective(isAllowedToContainClause(OMPC_default))`` matches /// ``omp parallel`` and ``omp parallel for``. /// /// If the matcher is use from clang-query, ``OpenMPClauseKind`` parameter /// should be passed as a quoted string. e.g., /// ``isAllowedToContainClauseKind("OMPC_default").`` AST_MATCHER_P(OMPExecutableDirective, isAllowedToContainClauseKind, OpenMPClauseKind, CKind) { return llvm::omp::isAllowedClauseForDirective( Node.getDirectiveKind(), CKind, Finder->getASTContext().getLangOpts().OpenMP); } //----------------------------------------------------------------------------// // End OpenMP handling. //----------------------------------------------------------------------------// } // namespace ast_matchers } // namespace clang #endif // LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H
GB_unaryop__abs_int8_uint16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_int8_uint16 // op(A') function: GB_tran__abs_int8_uint16 // C type: int8_t // A type: uint16_t // cast: int8_t cij = (int8_t) aij // unaryop: cij = GB_IABS (aij) #define GB_ATYPE \ uint16_t #define GB_CTYPE \ int8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IABS (x) ; // casting #define GB_CASTING(z, x) \ int8_t z = (int8_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_INT8 || GxB_NO_UINT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_int8_uint16 ( int8_t *restrict Cx, const uint16_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_int8_uint16 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unaryop__lnot_bool_fp32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_bool_fp32 // op(A') function: GB_tran__lnot_bool_fp32 // C type: bool // A type: float // cast: bool cij = (bool) aij // unaryop: cij = !aij #define GB_ATYPE \ float #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !x ; // casting #define GB_CASTING(z, aij) \ bool z = (bool) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_BOOL || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_bool_fp32 ( bool *Cx, // Cx and Ax may be aliased float *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_bool_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unop__identity_int16_fc32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_int16_fc32) // op(A') function: GB (_unop_tran__identity_int16_fc32) // C type: int16_t // A type: GxB_FC32_t // cast: int16_t cij = GB_cast_to_int16_t ((double) crealf (aij)) // unaryop: cij = aij #define GB_ATYPE \ GxB_FC32_t #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ int16_t z = GB_cast_to_int16_t ((double) crealf (aij)) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int16_t z = GB_cast_to_int16_t ((double) crealf (aij)) ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT16 || GxB_NO_FC32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_int16_fc32) ( int16_t *Cx, // Cx and Ax may be aliased const GxB_FC32_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC32_t aij = Ax [p] ; int16_t z = GB_cast_to_int16_t ((double) crealf (aij)) ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC32_t aij = Ax [p] ; int16_t z = GB_cast_to_int16_t ((double) crealf (aij)) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_int16_fc32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
5.norace1.c
// RUN: clang %loadLLOV %s -o /dev/null 2>&1 | FileCheck %s #include <omp.h> #define N 20 int main() { int A[N][N]; #pragma omp parallel for for (int i = 1; i < N; i++) for (int j = 0; j < N; j++) A[i][j] = A[i][j]; } // CHECK: Region is Data Race Free. // END
tensor_utils.h
// BEGINLICENSE // // This file is part of helPME, which is distributed under the BSD 3-clause license, // as described in the LICENSE file in the top level directory of this project. // // Author: Andrew C. Simmonett // // ENDLICENSE #ifndef _HELPME_TENSOR_UTILS_H_ #define _HELPME_TENSOR_UTILS_H_ #if HAVE_BLAS == 1 extern "C" { extern void dgemm_(char *, char *, int *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); extern void sgemm_(char *, char *, int *, int *, int *, float *, float *, int *, float *, int *, float *, float *, int *); } #endif namespace helpme { /*! * \brief Sorts a 3D tensor stored contiguously as ABC into CBA order. * \param abcPtr the address of the incoming ABC ordered tensor. * \param aDimension the dimension of the A index. * \param bDimension the dimension of the B index. * \param cDimension the dimension of the C index. * \param cbaPtr the address of the outgoing CBA ordered tensor. * \param nThreads the number of parallel threads to use. */ template <typename Real> void permuteABCtoCBA(Real const *__restrict__ abcPtr, int const aDimension, int const bDimension, int const cDimension, Real *__restrict__ cbaPtr, size_t nThreads = 1) { #pragma omp parallel for num_threads(nThreads) for (int C = 0; C <= -1 + cDimension; ++C) for (int B = 0; B <= -1 + bDimension; ++B) for (int A = 0; A <= -1 + aDimension; ++A) cbaPtr[aDimension * bDimension * C + aDimension * B + A] = abcPtr[cDimension * bDimension * A + cDimension * B + C]; } /*! * \brief Sorts a 3D tensor stored contiguously as ABC into ACB order. * \param abcPtr the address of the incoming ABC ordered tensor. * \param aDimension the dimension of the A index. * \param bDimension the dimension of the B index. * \param cDimension the dimension of the C index. * \param acbPtr the address of the outgoing ACB ordered tensor. * \param nThreads the number of parallel threads to use. */ template <typename Real> void permuteABCtoACB(Real const *__restrict__ abcPtr, int const aDimension, int const bDimension, int const cDimension, Real *__restrict__ acbPtr, size_t nThreads = 1) { #pragma omp parallel for num_threads(nThreads) for (int A = 0; A <= -1 + aDimension; ++A) for (int C = 0; C <= -1 + cDimension; ++C) for (int B = 0; B <= -1 + bDimension; ++B) acbPtr[bDimension * cDimension * A + bDimension * C + B] = abcPtr[cDimension * bDimension * A + cDimension * B + C]; } /*! * \brief Contracts an ABxC tensor with a DxC tensor, to produce an ABxD quantity. * \param abcPtr the address of the incoming ABxC tensor. * \param dcPtr the address of the incoming DxC tensor. * \param abDimension the dimension of the AB index. * \param cDimension the dimension of the C index. * \param dDimension the dimension of the D index. * \param abdPtr the address of the outgoing ABD tensor. */ template <typename Real> void contractABxCWithDxC(Real const *__restrict__ abcPtr, Real const *__restrict__ dcPtr, int const abDimension, int const cDimension, int const dDimension, Real *__restrict__ abdPtr) { Real acc_C; for (int AB = 0; AB <= -1 + abDimension; ++AB) { for (int D = 0; D <= -1 + dDimension; ++D) { acc_C = 0; for (int C = 0; C <= -1 + cDimension; ++C) acc_C = acc_C + abcPtr[cDimension * AB + C] * dcPtr[cDimension * D + C]; abdPtr[dDimension * AB + D] = acc_C; } } } #if HAVE_BLAS == 1 template <> void contractABxCWithDxC<float>(float const *__restrict__ abcPtr, float const *__restrict__ dcPtr, int const abDimension, int const cDimension, int const dDimension, float *__restrict__ abdPtr) { if (abDimension == 0 || cDimension == 0 || dDimension == 0) return; char transB = 't'; char transA = 'n'; float alpha = 1; float beta = 0; sgemm_(&transB, &transA, const_cast<int *>(&dDimension), const_cast<int *>(&abDimension), const_cast<int *>(&cDimension), &alpha, const_cast<float *>(dcPtr), const_cast<int *>(&cDimension), const_cast<float *>(abcPtr), const_cast<int *>(&cDimension), &beta, abdPtr, const_cast<int *>(&dDimension)); } template <> void contractABxCWithDxC<double>(double const *__restrict__ abcPtr, double const *__restrict__ dcPtr, int const abDimension, int const cDimension, int const dDimension, double *__restrict__ abdPtr) { if (abDimension == 0 || cDimension == 0 || dDimension == 0) return; char transB = 't'; char transA = 'n'; double alpha = 1; double beta = 0; dgemm_(&transB, &transA, const_cast<int *>(&dDimension), const_cast<int *>(&abDimension), const_cast<int *>(&cDimension), &alpha, const_cast<double *>(dcPtr), const_cast<int *>(&cDimension), const_cast<double *>(abcPtr), const_cast<int *>(&cDimension), &beta, abdPtr, const_cast<int *>(&dDimension)); } #endif } // Namespace helpme #endif // Header guard
QuadNode.h
/* * QuadNode.h * * Created on: 21.05.2014 * Author: Moritz v. Looz (moritz.looz-corswarem@kit.edu) */ #ifndef QUADNODE_H_ #define QUADNODE_H_ #include <vector> #include <algorithm> #include <functional> #include <assert.h> #include "../../auxiliary/Log.h" #include "../../auxiliary/Parallel.h" #include "../../geometric/HyperbolicSpace.h" using std::vector; using std::min; using std::max; using std::cos; namespace NetworKit { template <class T, bool poincare = true> class QuadNode { friend class QuadTreeGTest; private: double leftAngle; double minR; double rightAngle; double maxR; Point2D<double> a,b,c,d; unsigned capacity; static const unsigned coarsenLimit = 4; static const long unsigned sanityNodeLimit = 10E15; //just assuming, for debug purposes, that this algorithm never runs on machines with more than 4 Petabyte RAM count subTreeSize; std::vector<T> content; std::vector<Point2D<double> > positions; std::vector<double> angles; std::vector<double> radii; bool isLeaf; bool splitTheoretical; double alpha; double balance; index ID; double lowerBoundR; public: std::vector<QuadNode> children; QuadNode() { //This should never be called. leftAngle = 0; rightAngle = 0; minR = 0; maxR = 0; capacity = 20; isLeaf = true; subTreeSize = 0; balance = 0.5; splitTheoretical = false; alpha = 1; lowerBoundR = maxR; ID = 0; } /** * Construct a QuadNode for polar coordinates. * * * @param leftAngle Minimal angular coordinate of region, in radians from 0 to 2\pi * @param minR Minimal radial coordinate of region, between 0 and 1 * @param rightAngle Maximal angular coordinate of region, in radians from 0 to 2\pi * @param maxR Maximal radial coordinate of region, between 0 and 1 * @param capacity Number of points a leaf cell can store before splitting * @param minDiameter Minimal diameter of a quadtree node. If the node is already smaller, don't split even if over capacity. Default is 0 * @param splitTheoretical Whether to split in a theoretically optimal way or in a way to decrease measured running times * @param alpha dispersion Parameter of the point distribution. Only has an effect if theoretical split is true * @param diagnostics Count how many necessary and unnecessary comparisons happen in leaf cells? Will cause race condition and false sharing in parallel use * */ QuadNode(double leftAngle, double minR, double rightAngle, double maxR, unsigned capacity = 1000, bool splitTheoretical = false, double alpha = 1, double balance = 0.5) { if (balance <= 0 || balance >= 1) throw std::runtime_error("Quadtree balance parameter must be between 0 and 1."); if (poincare && maxR > 1) throw std::runtime_error("The Poincare disk has a radius of 1, cannot create quadtree larger than that!"); this->leftAngle = leftAngle; this->minR = minR; this->maxR = maxR; this->rightAngle = rightAngle; this->a = HyperbolicSpace::polarToCartesian(leftAngle, minR); this->b = HyperbolicSpace::polarToCartesian(rightAngle, minR); this->c = HyperbolicSpace::polarToCartesian(rightAngle, maxR); this->d = HyperbolicSpace::polarToCartesian(leftAngle, maxR); this->capacity = capacity; this->alpha = alpha; this->splitTheoretical = splitTheoretical; this->balance = balance; this->lowerBoundR = maxR; this->ID = 0; isLeaf = true; subTreeSize = 0; } void split() { assert(isLeaf); //heavy lifting: split up! double middleAngle = (rightAngle - leftAngle) / 2 + leftAngle; /** * we want to make sure the space is evenly divided to obtain a balanced tree * Simply halving the radius will cause a larger space for the outer Quadnode, resulting in an unbalanced tree */ double middleR; if (poincare) { if (splitTheoretical) { double hyperbolicOuter = HyperbolicSpace::EuclideanRadiusToHyperbolic(maxR); double hyperbolicInner = HyperbolicSpace::EuclideanRadiusToHyperbolic(minR); double hyperbolicMiddle = acosh((1-balance)*cosh(alpha*hyperbolicOuter) + balance*cosh(alpha*hyperbolicInner))/alpha; middleR = HyperbolicSpace::hyperbolicRadiusToEuclidean(hyperbolicMiddle); } else { double nom = maxR - minR; double denom = pow((1-maxR*maxR)/(1-minR*minR), 0.5)+1; middleR = nom/denom + minR; } } else { middleR = acosh((1-balance)*cosh(alpha*maxR) + balance*cosh(alpha*minR))/alpha; } //one could also use the median here. Results in worse asymptotical complexity, but maybe better runtime? assert(middleR < maxR); assert(middleR > minR); QuadNode<index,poincare> southwest(leftAngle, minR, middleAngle, middleR, capacity, splitTheoretical, alpha, balance); QuadNode<index,poincare> southeast(middleAngle, minR, rightAngle, middleR, capacity, splitTheoretical, alpha, balance); QuadNode<index,poincare> northwest(leftAngle, middleR, middleAngle, maxR, capacity, splitTheoretical, alpha, balance); QuadNode<index,poincare> northeast(middleAngle, middleR, rightAngle, maxR, capacity, splitTheoretical, alpha, balance); children = {southwest, southeast, northwest, northeast}; isLeaf = false; } /** * Add a point at polar coordinates (angle, R) with content input. May split node if capacity is full * * @param input arbitrary content, in our case an index * @param angle angular coordinate of point, between 0 and 2 pi. * @param R radial coordinate of point, between 0 and 1. */ void addContent(T input, double angle, double R) { assert(input < sanityNodeLimit); assert(this->responsible(angle, R)); if (lowerBoundR > R) lowerBoundR = R; if (isLeaf) { if (content.size() + 1 < capacity) { content.push_back(input); angles.push_back(angle); radii.push_back(R); Point2D<double> pos = HyperbolicSpace::polarToCartesian(angle, R); positions.push_back(pos); } else { split(); for (index i = 0; i < content.size(); i++) { this->addContent(content[i], angles[i], radii[i]); } assert(subTreeSize == content.size());//we have added everything twice subTreeSize = content.size(); content.clear(); angles.clear(); radii.clear(); positions.clear(); this->addContent(input, angle, R); } } else { assert(children.size() > 0); for (index i = 0; i < children.size(); i++) { if (children[i].responsible(angle, R)) { children[i].addContent(input, angle, R); break; } } subTreeSize++; } } /** * Remove content at polar coordinates (angle, R). May cause coarsening of the quadtree * * @param input Content to be removed * @param angle Angular coordinate * @param R Radial coordinate * * @return True if content was found and removed, false otherwise */ bool removeContent(T input, double angle, double R) { if (!responsible(angle, R)) return false; if (isLeaf) { index i = 0; for (; i < content.size(); i++) { if (content[i] == input) break; } if (i < content.size()) { assert(angles[i] == angle); assert(radii[i] == R); //remove element content.erase(content.begin()+i); positions.erase(positions.begin()+i); angles.erase(angles.begin()+i); radii.erase(radii.begin()+i); return true; } else { return false; } } else { bool removed = false; bool allLeaves = true; assert(children.size() > 0); for (index i = 0; i < children.size(); i++) { if (!children[i].isLeaf) allLeaves = false; if (children[i].removeContent(input, angle, R)) { assert(!removed); removed = true; } } if (removed) subTreeSize--; //coarsen? if (removed && allLeaves && size() < coarsenLimit) { //coarsen!! //why not assert empty containers and then insert directly? vector<T> allContent; vector<Point2D<double> > allPositions; vector<double> allAngles; vector<double> allRadii; for (index i = 0; i < children.size(); i++) { allContent.insert(allContent.end(), children[i].content.begin(), children[i].content.end()); allPositions.insert(allPositions.end(), children[i].positions.begin(), children[i].positions.end()); allAngles.insert(allAngles.end(), children[i].angles.begin(), children[i].angles.end()); allRadii.insert(allRadii.end(), children[i].radii.begin(), children[i].radii.end()); } assert(subTreeSize == allContent.size()); assert(subTreeSize == allPositions.size()); assert(subTreeSize == allAngles.size()); assert(subTreeSize == allRadii.size()); children.clear(); content.swap(allContent); positions.swap(allPositions); angles.swap(allAngles); radii.swap(allRadii); isLeaf = true; } return removed; } } /** * Check whether the region managed by this node lies outside of an Euclidean circle. * * @param query Center of the Euclidean query circle, given in Cartesian coordinates * @param radius Radius of the Euclidean query circle * * @return True if the region managed by this node lies completely outside of the circle */ bool outOfReach(Point2D<double> query, double radius) const { double phi, r; HyperbolicSpace::cartesianToPolar(query, phi, r); if (responsible(phi, r)) return false; //if using native coordinates, call distance calculation if (!poincare) return hyperbolicDistances(phi, r).first > radius; //get four edge points double topDistance, bottomDistance, leftDistance, rightDistance; if (phi < leftAngle || phi > rightAngle) { topDistance = min(c.distance(query), d.distance(query)); } else { topDistance = abs(r - maxR); } if (topDistance <= radius) return false; if (phi < leftAngle || phi > rightAngle) { bottomDistance = min(a.distance(query), b.distance(query)); } else { bottomDistance = abs(r - minR); } if (bottomDistance <= radius) return false; double minDistanceR = r*cos(abs(phi-leftAngle)); if (minDistanceR > minR && minDistanceR < maxR) { leftDistance = query.distance(HyperbolicSpace::polarToCartesian(phi, minDistanceR)); } else { leftDistance = min(a.distance(query), d.distance(query)); } if (leftDistance <= radius) return false; minDistanceR = r*cos(abs(phi-rightAngle)); if (minDistanceR > minR && minDistanceR < maxR) { rightDistance = query.distance(HyperbolicSpace::polarToCartesian(phi, minDistanceR)); } else { rightDistance = min(b.distance(query), c.distance(query)); } if (rightDistance <= radius) return false; return true; } /** * Check whether the region managed by this node lies outside of an Euclidean circle. * Functionality is the same as in the method above, but it takes polar coordinates instead of Cartesian ones * * @param angle_c Angular coordinate of the Euclidean query circle's center * @param r_c Radial coordinate of the Euclidean query circle's center * @param radius Radius of the Euclidean query circle * * @return True if the region managed by this node lies completely outside of the circle */ bool outOfReach(double angle_c, double r_c, double radius) const { if (responsible(angle_c, r_c)) return false; Point2D<double> query = HyperbolicSpace::polarToCartesian(angle_c, r_c); return outOfReach(query, radius); } /** * @param phi Angular coordinate of query point * @param r_h radial coordinate of query point in poincare disk */ std::pair<double, double> hyperbolicDistances(double phi, double r) const { double minRHyper, maxRHyper, r_h; if (poincare) { minRHyper=HyperbolicSpace::EuclideanRadiusToHyperbolic(this->minR); maxRHyper=HyperbolicSpace::EuclideanRadiusToHyperbolic(this->maxR); r_h = HyperbolicSpace::EuclideanRadiusToHyperbolic(r); } else { minRHyper=this->minR; maxRHyper=this->maxR; r_h = r; } double coshr = cosh(r_h); double sinhr = sinh(r_h); double coshMinR = cosh(minRHyper); double coshMaxR = cosh(maxRHyper); double sinhMinR = sinh(minRHyper); double sinhMaxR = sinh(maxRHyper); double cosDiffLeft = cos(phi - leftAngle); double cosDiffRight = cos(phi - rightAngle); /** * If the query point is not within the quadnode, the distance minimum is on the border. * Need to check whether extremum is between corners: */ double coshMinDistance, coshMaxDistance; //Left border double lowerLeftDistance = coshMinR*coshr-sinhMinR*sinhr*cosDiffLeft; double upperLeftDistance = coshMaxR*coshr-sinhMaxR*sinhr*cosDiffLeft; if (responsible(phi, r)) coshMinDistance = 1; //strictly speaking, this is wrong else coshMinDistance = min(lowerLeftDistance, upperLeftDistance); coshMaxDistance = max(lowerLeftDistance, upperLeftDistance); //double a = cosh(r_h); double b = sinhr*cosDiffLeft; double extremum = log((coshr+b)/(coshr-b))/2; if (extremum < maxRHyper && extremum >= minRHyper) { double extremeDistance = cosh(extremum)*coshr-sinh(extremum)*sinhr*cosDiffLeft; coshMinDistance = min(coshMinDistance, extremeDistance); coshMaxDistance = max(coshMaxDistance, extremeDistance); } /** * cosh is a function from [0,\infty) to [1, \infty) * Variables thus need */ assert(coshMaxDistance >= 1); assert(coshMinDistance >= 1); //Right border double lowerRightDistance = coshMinR*coshr-sinhMinR*sinhr*cosDiffRight; double upperRightDistance = coshMaxR*coshr-sinhMaxR*sinhr*cosDiffRight; coshMinDistance = min(coshMinDistance, lowerRightDistance); coshMinDistance = min(coshMinDistance, upperRightDistance); coshMaxDistance = max(coshMaxDistance, lowerRightDistance); coshMaxDistance = max(coshMaxDistance, upperRightDistance); b = sinhr*cosDiffRight; extremum = log((coshr+b)/(coshr-b))/2; if (extremum < maxRHyper && extremum >= minRHyper) { double extremeDistance = cosh(extremum)*coshr-sinh(extremum)*sinhr*cosDiffRight; coshMinDistance = min(coshMinDistance, extremeDistance); coshMaxDistance = max(coshMaxDistance, extremeDistance); } assert(coshMaxDistance >= 1); assert(coshMinDistance >= 1); //upper and lower borders if (phi >= leftAngle && phi < rightAngle) { double lower = cosh(abs(r_h-minRHyper)); double upper = cosh(abs(r_h-maxRHyper)); coshMinDistance = min(coshMinDistance, lower); coshMinDistance = min(coshMinDistance, upper); coshMaxDistance = max(coshMaxDistance, upper); coshMaxDistance = max(coshMaxDistance, lower); } assert(coshMaxDistance >= 1); assert(coshMinDistance >= 1); //again with mirrored phi double mirrorphi; if (phi >= M_PI) mirrorphi = phi - M_PI; else mirrorphi = phi + M_PI; if (mirrorphi >= leftAngle && mirrorphi < rightAngle) { double lower = coshMinR*coshr+sinhMinR*sinhr; double upper = coshMaxR*coshr+sinhMaxR*sinhr; coshMinDistance = min(coshMinDistance, lower); coshMinDistance = min(coshMinDistance, upper); coshMaxDistance = max(coshMaxDistance, upper); coshMaxDistance = max(coshMaxDistance, lower); } assert(coshMaxDistance >= 1); assert(coshMinDistance >= 1); double minDistance, maxDistance; minDistance = acosh(coshMinDistance); maxDistance = acosh(coshMaxDistance); assert(maxDistance >= 0); assert(minDistance >= 0); return std::pair<double, double>(minDistance, maxDistance); } /** * Does the point at (angle, r) fall inside the region managed by this QuadNode? * * @param angle Angular coordinate of input point * @param r Radial coordinate of input points * * @return True if input point lies within the region of this QuadNode */ bool responsible(double angle, double r) const { return (angle >= leftAngle && angle < rightAngle && r >= minR && r < maxR); } /** * Get all Elements in this QuadNode or a descendant of it * * @return vector of content type T */ std::vector<T> getElements() const { if (isLeaf) { return content; } else { assert(content.size() == 0); assert(angles.size() == 0); assert(radii.size() == 0); vector<T> result; for (index i = 0; i < children.size(); i++) { std::vector<T> subresult = children[i].getElements(); result.insert(result.end(), subresult.begin(), subresult.end()); } return result; } } void getCoordinates(vector<double> &anglesContainer, vector<double> &radiiContainer) const { assert(angles.size() == radii.size()); if (isLeaf) { anglesContainer.insert(anglesContainer.end(), angles.begin(), angles.end()); radiiContainer.insert(radiiContainer.end(), radii.begin(), radii.end()); } else { assert(content.size() == 0); assert(angles.size() == 0); assert(radii.size() == 0); for (index i = 0; i < children.size(); i++) { children[i].getCoordinates(anglesContainer, radiiContainer); } } } /** * Don't use this! * Code is still in here for a unit test. * * Get copy of the leaf cell responsible for a point at (angle, r). * Expensive because it copies the whole subtree, causes assertion failure if called with the wrong arguments * * @param angle Angular coordinate of point * @param r Radial coordinate of point * * @return Copy of leaf cell containing point, or dummy cell not responsible for point * */ QuadNode<T>& getAppropriateLeaf(double angle, double r) { assert(this->responsible(angle, r)); if (isLeaf) return *this;//will this return the reference to the subtree itself or to a copy? else { for (index i = 0; i < children.size(); i++) { bool foundResponsibleChild = false; if (children[i].responsible(angle, r)) { assert(foundResponsibleChild == false); foundResponsibleChild = true; return children[i].getAppropriateLeaf(angle, r); } } throw std::runtime_error("No responsible child found."); } } /** * Main query method, get points lying in a Euclidean circle around the center point. * Optional limits can be given to get a different result or to reduce unnecessary comparisons * * Elements are pushed onto a vector which is a required argument. This is done to reduce copying * * Safe to call in parallel if diagnostics are disabled * * @param center Center of the query circle * @param radius Radius of the query circle * @param result Reference to the vector where the results will be stored * @param minAngle Optional value for the minimum angular coordinate of the query region * @param maxAngle Optional value for the maximum angular coordinate of the query region * @param lowR Optional value for the minimum radial coordinate of the query region * @param highR Optional value for the maximum radial coordinate of the query region */ void getElementsInEuclideanCircle(Point2D<double> center, double radius, vector<T> &result, double minAngle=0, double maxAngle=2*M_PI, double lowR=0, double highR = 1) const { if (!poincare) throw std::runtime_error("Euclidean query circles not yet implemented for native hyperbolic coordinates."); if (minAngle >= rightAngle || maxAngle <= leftAngle || lowR >= maxR || highR < lowerBoundR) return; if (outOfReach(center, radius)) { return; } if (isLeaf) { const double rsq = radius*radius; const double queryX = center[0]; const double queryY = center[1]; const count cSize = content.size(); for (index i = 0; i < cSize; i++) { const double deltaX = positions[i].getX() - queryX; const double deltaY = positions[i].getY() - queryY; if (deltaX*deltaX + deltaY*deltaY < rsq) { result.push_back(content[i]); if (content[i] >= sanityNodeLimit) DEBUG("Quadnode content ", content[i], " found, suspiciously high!"); assert(content[i] < sanityNodeLimit); } } } else { for (index i = 0; i < children.size(); i++) { children[i].getElementsInEuclideanCircle(center, radius, result, minAngle, maxAngle, lowR, highR); } } } count getElementsProbabilistically(Point2D<double> euQuery, std::function<double(double)> prob, bool suppressLeft, vector<T> &result) const { double phi_q, r_q; HyperbolicSpace::cartesianToPolar(euQuery, phi_q, r_q); if (suppressLeft && phi_q > rightAngle) return 0; TRACE("Getting hyperbolic distances"); auto distancePair = hyperbolicDistances(phi_q, r_q); double probUB = prob(distancePair.first); double probLB = prob(distancePair.second); assert(probLB <= probUB); if (probUB > 0.5) probUB = 1;//if we are going to take every second element anyway, no use in calculating expensive jumps if (probUB == 0) return 0; //TODO: return whole if probLB == 1 double probdenom = std::log(1-probUB); if (probdenom == 0) { DEBUG(probUB, " not zero, but too small too process. Ignoring."); return 0; } TRACE("probUB: ", probUB, ", probdenom: ", probdenom); count expectedNeighbours = probUB*size(); count candidatesTested = 0; if (isLeaf) { const count lsize = content.size(); TRACE("Leaf of size ", lsize); for (index i = 0; i < lsize; i++) { //jump! if (probUB < 1) { double random = Aux::Random::real(); double delta = std::log(random) / probdenom; assert(delta == delta); assert(delta >= 0); i += delta; if (i >= lsize) break; TRACE("Jumped with delta ", delta, " arrived at ", i); } //see where we've arrived candidatesTested++; double distance; if (poincare) { distance = HyperbolicSpace::poincareMetric(positions[i], euQuery); } else { distance = HyperbolicSpace::nativeDistance(angles[i], radii[i], phi_q, r_q); } assert(distance >= distancePair.first); double q = prob(distance); q = q / probUB; //since the candidate was selected by the jumping process, we have to adjust the probabilities assert(q <= 1); assert(q >= 0); //accept? double acc = Aux::Random::real(); if (acc < q) { TRACE("Accepted node ", i, " with probability ", q, "."); result.push_back(content[i]); } } } else { if (expectedNeighbours < 1) {//select candidates directly instead of calling recursively TRACE("probUB = ", probUB, ", switching to direct candidate selection."); assert(probUB < 1); const count stsize = size(); for (index i = 0; i < stsize; i++) { double delta = std::log(Aux::Random::real()) / probdenom; assert(delta >= 0); i += delta; TRACE("Jumped with delta ", delta, " arrived at ", i, ". Calling maybeGetKthElement."); if (i < size()) maybeGetKthElement(probUB, euQuery, prob, i, result);//this could be optimized. As of now, the offset is subtracted separately for each point else break; candidatesTested++; } } else {//carry on as normal for (index i = 0; i < children.size(); i++) { TRACE("Recursively calling child ", i); candidatesTested += children[i].getElementsProbabilistically(euQuery, prob, suppressLeft, result); } } } //DEBUG("Expected at most ", expectedNeighbours, " neighbours, got ", result.size() - offset); return candidatesTested; } void maybeGetKthElement(double upperBound, Point2D<double> euQuery, std::function<double(double)> prob, index k, vector<T> &circleDenizens) const { TRACE("Maybe get element ", k, " with upper Bound ", upperBound); assert(k < size()); if (isLeaf) { double distance; if (poincare) { distance = HyperbolicSpace::poincareMetric(positions[k], euQuery); } else { double phi_q, r_q; HyperbolicSpace::cartesianToPolar(euQuery, phi_q, r_q); distance = HyperbolicSpace::nativeDistance(angles[k], radii[k], phi_q, r_q); } double acceptance = prob(distance)/upperBound; TRACE("Is leaf, accept with ", acceptance); if (Aux::Random::real() < acceptance) circleDenizens.push_back(content[k]); } else { TRACE("Call recursively."); index offset = 0; for (index i = 0; i < children.size(); i++) { count childsize = children[i].size(); if (k - offset < childsize) { children[i].maybeGetKthElement(upperBound, euQuery, prob, k - offset, circleDenizens); break; } offset += childsize; } } } /** * Shrink all vectors in this subtree to fit the content. * Call after quadtree construction is complete, causes better memory usage and cache efficiency */ void trim() { content.shrink_to_fit(); positions.shrink_to_fit(); angles.shrink_to_fit(); radii.shrink_to_fit(); if (!isLeaf) { for (index i = 0; i < children.size(); i++) { children[i].trim(); } } } /** * Number of points lying in the region managed by this QuadNode */ count size() const { return isLeaf ? content.size() : subTreeSize; } void recount() { subTreeSize = 0; for (index i = 0; i < children.size(); i++) { children[i].recount(); subTreeSize += children[i].size(); } } /** * Height of subtree hanging from this QuadNode */ count height() const { count result = 1;//if leaf node, the children loop will not execute for (auto child : children) result = std::max(result, child.height()+1); return result; } /** * Leaf cells in the subtree hanging from this QuadNode */ count countLeaves() const { if (isLeaf) return 1; count result = 0; for (index i = 0; i < children.size(); i++) { result += children[i].countLeaves(); } return result; } double getLeftAngle() const { return leftAngle; } double getRightAngle() const { return rightAngle; } double getMinR() const { return minR; } double getMaxR() const { return maxR; } index getID() const { return ID; } index indexSubtree(index nextID) { index result = nextID; assert(children.size() == 4 || children.size() == 0); for (int i = 0; i < children.size(); i++) { result = children[i].indexSubtree(result); } this->ID = result; return result+1; } index getCellID(double phi, double r) const { if (!responsible(phi, r)) return -1; if (isLeaf) return getID(); else { for (int i = 0; i < 4; i++) { index childresult = children[i].getCellID(phi, r); if (childresult >= 0) return childresult; } assert(false); //if responsible return -1; } } index getMaxIDInSubtree() const { if (isLeaf) return getID(); else { index result = -1; for (int i = 0; i < 4; i++) { result = std::max(children[i].getMaxIDInSubtree(), result); } return std::max(result, getID()); } } count reindex(count offset) { if (isLeaf) { #pragma omp task { index p = offset; std::generate(content.begin(), content.end(), [&p](){return p++;}); } offset += size(); } else { for (int i = 0; i < 4; i++) { offset = children[i].reindex(offset); } } return offset; } }; } #endif /* QUADNODE_H_ */
GB_unaryop__identity_uint64_int64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_uint64_int64 // op(A') function: GB_tran__identity_uint64_int64 // C type: uint64_t // A type: int64_t // cast: uint64_t cij = (uint64_t) aij // unaryop: cij = aij #define GB_ATYPE \ int64_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ uint64_t z = (uint64_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT64 || GxB_NO_INT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_uint64_int64 ( uint64_t *restrict Cx, const int64_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_uint64_int64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
sequence_distance.c
/* Kalign - a multiple sequence alignment program Copyright 2006, 2019 Timo Lassmann This file is part of kalign. Kalign is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <https://www.gnu.org/licenses/>. */ #include <xmmintrin.h> #include "sequence_distance.h" #include "alphabet.h" /* #include "alignment.h" */ #include "align_io.h" #include "misc.h" #include "bpm.h" #define NODESIZE 16 /* small hash implementation */ struct bignode{ struct bignode *next; unsigned int pos[NODESIZE]; unsigned int num; }; struct bignode* big_insert_hash(struct bignode *n,const unsigned int pos); void big_remove_nodes(struct bignode *n); void big_print_nodes(struct bignode *n); float dna_distance_calculation(struct bignode* hash[],const uint8_t * p,const int seqlen,int diagonals,float mode); float protein_wu_distance_calculation(struct bignode* hash[],const uint8_t* seq,const int seqlen,const int diagonals,const float mode); float** d_estimation(struct msa* msa, int* samples, int num_samples,int pair) { float** dm = NULL; uint8_t* seq_a; uint8_t* seq_b; float dist; int len_a; int len_b; int i,j; #if HAVE_AVX2 set_broadcast_mask(); #endif if(pair){ RUN(galloc(&dm,num_samples,num_samples)); for(i = 0; i < num_samples;i++){ seq_a = msa->sequences[samples[i]]->s;// aln->s[samples[i]]; len_a = msa->sequences[samples[i]]->len;//aln->sl[samples[i]]; // fprintf(stdout,"seq_a=%d,len_a=%d\n",*(msa->sequences[samples[i]]->s),len_a); for(j = 0;j < num_samples;j++){ //fprintf(stdout, "Working on %d %d\n", i,j); seq_b = msa->sequences[samples[j]]->s; //aln->s[ samples[j]]; len_b = msa->sequences[samples[j]]->len;//aln->sl[selection[j]]; /*dm[i][j] = MACRO_MIN(len_a, len_b) - MACRO_MIN( bpm_256(seq_a, seq_b, len_a, len_b), bpm_256(seq_b, seq_a, len_b, len_a) );*/ dist = calc_distance(seq_a, seq_b, len_a, len_b,msa->L); //dist = dist / (float) MACRO_MIN(len_a, len_b); dm[i][j] = dist;// + (float)( i * num_samples + j) / (float) ( num_samples * num_samples); dm[j][i] = dm[i][j]; //fprintf(stdout,"%f ", dm[i][j]); } //fprintf(stdout,"\n"); } }else{ int a; int numseq = msa->numseq; MMALLOC(dm, sizeof(float*)* numseq); //fprintf(stdout,"MASK: %lx\n", mask); a = num_samples / 8; if( num_samples%8){ a++; } a = a << 3; for(i = 0; i < numseq;i++){ dm[i] = NULL; dm[i] = _mm_malloc(sizeof(float) * a,32); for(j = 0; j < a;j++){ dm[i][j] = 0.0F; } } // fprintf(stdout,"msa->L:%d\n",msa->L); struct msa_seq** s = msa->sequences; #ifdef HAVE_OPENMP #pragma omp parallel for shared(dm, s) private(i, j) collapse(2) schedule(static) #endif // fprintf(stdout,"%d,%d\n",numseq,num_samples); for(i = 0; i < numseq;i++){ for(j = 0;j < num_samples;j++){ // fprintf(stdout,"%d,%d\n",i,j); uint8_t* s1; uint8_t* s2; int l1; int l2; s1 = s[i]->s; l1 = s[i]->len; s2 = s[samples[j]]->s; l2 = s[samples[j]]->len; // fprintf(stdout,"%d,%d,%d,%d,%d\n",*s1,*s2,l1,l2,msa->L); // TODO:问题出现在calc_distance()这个函数上 // fprintf(stdout,"%f\n",dm[i][j]); dm[i][j] = calc_distance(s1, s2, l1, l2, msa->L); // fprintf(stdout,"done:%d,%d,%d,%d,%d\n",*s1,*s2,l1,l2,msa->L); //dm[i][j] += (float)MACRO_MIN(l1, l2) / (float)MACRO_MAX(l1, l2); //dm[i][j] = dm[i][j] / (float) MACRO_MIN(l1, l2); //dm[i][j] = dist; } } /* for(i = 0; i < numseq;i++){ */ /* seq_a = msa->sequences[i]->s;// aln->s[i]; */ /* len_a = msa->sequences[i]->len;// aln->sl[i]; */ /* for(j = 0;j < num_samples;j++){ */ /* seq_b = msa->sequences[samples[j]]->s;// aln->s[ seeds[j]]; */ /* len_b = msa->sequences[samples[j]]->len;// aln->sl[seeds[j]]; */ /* dist = calc_distance(seq_a, seq_b, len_a, len_b,msa->L); */ /* dm[i][j] = dist; */ /* } */ /* } */ } return dm; ERROR: return NULL; } float calc_distance(uint8_t* seq_a, uint8_t* seq_b, int len_a,int len_b, int L) { #ifdef HAVE_AVX2 uint8_t dist; if(len_a > len_b){ dist = bpm_256(seq_a, seq_b, len_a, len_b); }else{ dist = bpm_256(seq_b, seq_a, len_b, len_a); } return (float)dist; #else struct bignode* hash[1024]; int i; float dist; unsigned int hv; for (i = 0;i < 1024;i++){ hash[i] = 0; } /* Protein sequence */ if( L > defDNA){ for (i = len_a-2;i--;){ hv = (seq_a[i] << 5) + seq_a[i+1]; hash[hv] = big_insert_hash(hash[hv],i); hv = (seq_a[i] << 5) + seq_a[i+2]; hash[hv] = big_insert_hash(hash[hv],i); } dist = protein_wu_distance_calculation(hash,seq_b,len_b,len_a+len_b,58.9); }else{ for (i = len_a-5;i--;){ hv = ((seq_a[i]&3)<<8) + ((seq_a[i+1]&3)<<6) + ((seq_a[i+2]&3)<<4) + ((seq_a[i+3]&3)<<2) + (seq_a[i+4]&3);//ABCDE hash[hv] = big_insert_hash(hash[hv],i); hv = ((seq_a[i]&3)<<8) + ((seq_a[i+1]&3)<<6) + ((seq_a[i+2]&3)<<4) + ((seq_a[i+3]&3)<<2) + (seq_a[i+5]&3);//ABCDF hash[hv] = big_insert_hash(hash[hv],i); hv = ((seq_a[i]&3)<<8) + ((seq_a[i+1]&3)<<6) + ((seq_a[i+2]&3)<<4) + ((seq_a[i+4]&3)<<2) + (seq_a[i+5]&3);//ABCEF hash[hv] = big_insert_hash(hash[hv],i); hv = ((seq_a[i]&3)<<8) + ((seq_a[i+1]&3)<<6) + ((seq_a[i+3]&3)<<4) + ((seq_a[i+4]&3)<<2) + (seq_a[i+5]&3);//ABDEF hash[hv] = big_insert_hash(hash[hv],i); hv = ((seq_a[i]&3)<<8) + ((seq_a[i+2]&3)<<6) + ((seq_a[i+3]&3)<<4) + ((seq_a[i+4]&3)<<2) + (seq_a[i+5]&3);//ACDEF hash[hv] = big_insert_hash(hash[hv],i); } dist = dna_distance_calculation(hash,seq_b,len_b,len_a+len_b, 61.08); } for (i = 1024;i--;){ if (hash[i]){ big_remove_nodes(hash[i]); hash[i] = 0; } } return dist; #endif } float protein_wu_distance_calculation(struct bignode* hash[],const uint8_t* seq,const int seqlen,const int diagonals,const float mode) { struct bignode* node_p; unsigned int* d = NULL; unsigned int* tmp = NULL; float out = 0.0; register int i,j; register int c; register int num; register unsigned int hv; d = malloc(sizeof(unsigned int)*diagonals); //for (i = diagonals;i--;){ for (i = 0;i < diagonals;i++){ d[i] = 0; } for (i = seqlen-2;i--;){ //for(i = 0; i < seqlen-2;i++){ /*hv = (seq[i+1] << 5) + seq[i+2]; node_p = hash[hv]; while(node_p){ tmp = node_p->pos; for(j = 0;j < node_p->num;j++){ d[tmp[j]]++; } node_p = node_p->next; }*/ hv = (seq[i] << 5) + seq[i+1]; //printf("3:%d\n",hv); node_p = hash[hv]; while(node_p){ tmp = node_p->pos; num = node_p->num; for(j = 0;j < num;j++){ c = tmp[j]; d[c]++; c++; d[c]++; } node_p = node_p->next; } hv = (seq[i] << 5) + seq[i+2]; node_p = hash[hv]; while(node_p){ tmp = node_p->pos; num = node_p->num; for(j = 0;j < num;j++){ c = tmp[j]; d[c]++; } node_p = node_p->next; } d++; } //exit(0); d -= (seqlen-2); //unsigned int max = 0.0; for (i = diagonals;i--;){ // if(d[i] > max){ // max = d[i]; //} //d[i] /= minlen; //fprintf(stderr,"%d ",d[i]); if(d[i] > mode){ out += d[i]; // printf("%f %d\n",d[i]/ minlen,d[i]); } } free(d); //return out; return out; } float dna_distance_calculation(struct bignode* hash[],const uint8_t * p,const int seqlen,int diagonals,float mode) { struct bignode* node_p; float out = 0.0; unsigned int* tmp = NULL; unsigned int* d = NULL; int i,j; unsigned int hv; d = malloc(sizeof(int)*diagonals); for (i = 0;i < diagonals;i++){ d[i] = 0; } for (i = seqlen-5;i--;){ hv = ((p[i]&3)<<8) + ((p[i+1]&3)<<6) + ((p[i+2]&3)<<4) + ((p[i+3]&3)<<2) + (p[i+4]&3);//ABCDE if (hash[hv]){ node_p = hash[hv]; while(node_p){ tmp = node_p->pos; for(j = 0;j < (int) node_p->num;j++){ d[tmp[j]]++; } node_p = node_p->next; } } hv = ((p[i]&3)<<8) + ((p[i+1]&3)<<6) + ((p[i+2]&3)<<4) + ((p[i+3]&3)<<2) + (p[i+5]&3);//ABCDF if (hash[hv]){ node_p = hash[hv]; while(node_p){ tmp = node_p->pos; for(j = 0;j < (int)node_p->num;j++){ d[tmp[j]]++; } node_p = node_p->next; } } hv = ((p[i]&3)<<8) + ((p[i+1]&3)<<6) + ((p[i+2]&3)<<4) + ((p[i+4]&3)<<2) + (p[i+5]&3);//ABCEF if (hash[hv]){ node_p = hash[hv]; while(node_p){ tmp = node_p->pos; for(j = 0;j < (int)node_p->num;j++){ d[tmp[j]]++; } node_p = node_p->next; } } hv = ((p[i]&3)<<8) + ((p[i+1]&3)<<6) + ((p[i+3]&3)<<4) + ((p[i+4]&3)<<2) + (p[i+5]&3);//ABDEF if (hash[hv]){ node_p = hash[hv]; while(node_p){ tmp = node_p->pos; for(j = 0;j < (int)node_p->num;j++){ d[tmp[j]]++; } node_p = node_p->next; } } hv = ((p[i]&3)<<8) + ((p[i+2]&3)<<6) + ((p[i+3]&3)<<4) + ((p[i+4]&3)<<2) + (p[i+5]&3);//ACDEF if (hash[hv]){ node_p = hash[hv]; while(node_p){ tmp = node_p->pos; for(j = 0;j < (int)node_p->num;j++){ d[tmp[j]]++; } node_p = node_p->next; } } d++; } //exit(0); d -= (seqlen-5); for (i = diagonals;i--;){ //d[i] /= minlen; //printf("%d ",d[i]); if(d[i] > mode){ //fprintf(stderr,"%f %d\n",d[i]/ minlen,d[i]); out += d[i]; } } free(d); return out; } struct bignode* big_insert_hash(struct bignode *n,const unsigned int pos) { struct bignode* p = NULL; if(n){ if(n->num < NODESIZE){ n->pos[n->num] = pos; n->num++; return n; }else{ MMALLOC(p, sizeof(struct bignode)); p->pos[0] = pos; p->num = 1; p->next = n; } }else{ MMALLOC(p, sizeof(struct bignode)); p->pos[0] = pos; p->num = 1; p->next = n; } return p; ERROR: return NULL; } void big_remove_nodes(struct bignode *n) { struct bignode* p = NULL; while(n){ p = n; n = n->next; MFREE(p); } } void big_print_nodes(struct bignode *n) { int i; while(n){ for (i = 0; i < (int)n->num;i++){ fprintf(stderr,"%d ",n->pos[i]); } n = n->next; } }
ast-dump-openmp-simd.c
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s void test_one(int x) { #pragma omp simd for (int i = 0; i < x; i++) ; } void test_two(int x, int y) { #pragma omp simd for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) ; } void test_three(int x, int y) { #pragma omp simd collapse(1) for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) ; } void test_four(int x, int y) { #pragma omp simd collapse(2) for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) ; } void test_five(int x, int y, int z) { #pragma omp simd collapse(2) for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) for (int i = 0; i < z; i++) ; } // CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK: |-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-simd.c:3:1, line:7:1> line:3:6 test_one 'void (int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:22, line:7:1> // CHECK-NEXT: | `-OMPSimdDirective {{.*}} <line:4:1, col:17> // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:5:3, line:6:5> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK-NEXT: | | |-ForStmt {{.*}} <line:5:3, line:6:5> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:5:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:6:5> openmp_structured_block // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-simd.c:4:1) *const restrict' // CHECK-NEXT: | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: |-FunctionDecl {{.*}} <line:9:1, line:14:1> line:9:6 test_two 'void (int, int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:22, col:26> col:26 used y 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:29, line:14:1> // CHECK-NEXT: | `-OMPSimdDirective {{.*}} <line:10:1, col:17> // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK-NEXT: | | |-ForStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:11:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ForStmt {{.*}} <line:12:5, line:13:7> openmp_structured_block // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:12:10, col:19> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:13:7> // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-simd.c:10:1) *const restrict' // CHECK-NEXT: | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: |-FunctionDecl {{.*}} <line:16:1, line:21:1> line:16:6 test_three 'void (int, int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:17, col:21> col:21 used x 'int' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:24, col:28> col:28 used y 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:31, line:21:1> // CHECK-NEXT: | `-OMPSimdDirective {{.*}} <line:17:1, col:29> // CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:18, col:28> // CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:27> 'int' // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:27> 'int' 1 // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK-NEXT: | | |-ForStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:18:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ForStmt {{.*}} <line:19:5, line:20:7> openmp_structured_block // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:19:10, col:19> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:20:7> // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-simd.c:17:1) *const restrict' // CHECK-NEXT: | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: |-FunctionDecl {{.*}} <line:23:1, line:28:1> line:23:6 test_four 'void (int, int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:30, line:28:1> // CHECK-NEXT: | `-OMPSimdDirective {{.*}} <line:24:1, col:29> // CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:18, col:28> // CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:27> 'int' // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:27> 'int' 2 // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK-NEXT: | | |-ForStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:25:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ForStmt {{.*}} <line:26:5, line:27:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:26:10, col:19> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:27:7> openmp_structured_block // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-simd.c:24:1) *const restrict' // CHECK-NEXT: | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: `-FunctionDecl {{.*}} <line:30:1, line:36:1> line:30:6 test_five 'void (int, int, int)' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:30, col:34> col:34 used z 'int' // CHECK-NEXT: `-CompoundStmt {{.*}} <col:37, line:36:1> // CHECK-NEXT: `-OMPSimdDirective {{.*}} <line:31:1, col:29> // CHECK-NEXT: |-OMPCollapseClause {{.*}} <col:18, col:28> // CHECK-NEXT: | `-ConstantExpr {{.*}} <col:27> 'int' // CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:27> 'int' 2 // CHECK-NEXT: `-CapturedStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK-NEXT: | |-ForStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: | | |-DeclStmt {{.*}} <line:32:8, col:17> // CHECK-NEXT: | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | |-<<<NULL>>> // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | `-ForStmt {{.*}} <line:33:5, line:35:9> // CHECK-NEXT: | | |-DeclStmt {{.*}} <line:33:10, col:19> // CHECK-NEXT: | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | |-<<<NULL>>> // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | `-ForStmt {{.*}} <line:34:7, line:35:9> openmp_structured_block // CHECK-NEXT: | | |-DeclStmt {{.*}} <line:34:12, col:21> // CHECK-NEXT: | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | |-<<<NULL>>> // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<' // CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | `-NullStmt {{.*}} <line:35:9> // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-simd.c:31:1) *const restrict' // CHECK-NEXT: | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
mc.c
/** * mc.c * Authors: Yizhao Gao <yizhaotsccsj@gmail.com> * Date: {08/01/2017} */ #include <stdio.h> #include <stdlib.h> #include <random> #include <omp.h> #include "scan.h" using namespace std; void randomLabel(int * indAll, int casCount, int allCount) { static std::random_device rd; static std::mt19937 rng(rd()); static std::uniform_int_distribution<int> uni(0, allCount - 1); int casID; for(int i = 0; i < allCount; i++) indAll[i] = 0; for(int i = 0; i < casCount; i++) { casID = uni(rng); while(indAll[casID] == 1) casID = uni(rng); indAll[casID] = 1; } return; } int * monteCarlo(double * x, double * y, int * locEnding, int locCount, int casCount, int allCount, double wSize, int wCount, int highLow, double * clusterLL, int nClusters, int nSim) { int * nExtreme; if(NULL == (nExtreme = (int *) malloc (nClusters * sizeof(int)))) { printf("ERROR: Out of memory at line %d in file %s\n", __LINE__, __FILE__); exit(1); } for(int i = 0; i < nClusters; i++) nExtreme[i] = 0; int * indAll; int * simCass; int * simCons; if(NULL == (indAll = (int *) malloc (allCount * sizeof(int)))) { printf("ERROR: Out of memory at line %d in file %s\n", __LINE__, __FILE__); exit(1); } if(NULL == (simCass = (int *) malloc (locCount * sizeof(int)))) { printf("ERROR: Out of memory at line %d in file %s\n", __LINE__, __FILE__); exit(1); } if(NULL == (simCons = (int *) malloc (locCount * sizeof(int)))) { printf("ERROR: Out of memory at line %d in file %s\n", __LINE__, __FILE__); exit(1); } int indID, simCas, simCon; int * simCasInW; int * simConInW; double * simll; if(NULL == (simCasInW = (int *) malloc (locCount * wCount * sizeof(int)))) { printf("ERROR: Out of memory at line %d in file %s\n", __LINE__, __FILE__); exit(1); } if(NULL == (simConInW = (int *) malloc (locCount * wCount * sizeof(int)))) { printf("ERROR: Out of memory at line %d in file %s\n", __LINE__, __FILE__); exit(1); } if(NULL == (simll = (double *) malloc (locCount * wCount * sizeof(double)))) { printf("ERROR: Out of memory at line %d in file %s\n", __LINE__, __FILE__); exit(1); } double simMaxLL; for(int i = 0; i < nSim; i++) { randomLabel(indAll, casCount, allCount); indID = 0; for(int j = 0; j < locCount; j++) { simCas = 0; simCon = 0; for(; indID < locEnding[j]; indID ++) { if(indAll[indID] == 1) { simCas ++; } else { simCon ++; } } simCass[j] = simCas; simCons[j] = simCon; } getCCCount(x, y, simCass, simCons, locCount, wSize, wCount, simCasInW, simConInW); loglikelihood(simll, simCasInW, simConInW, locCount * wCount, casCount, allCount - casCount, highLow); simMaxLL = 1; int k = 0; for(; k < locCount * wCount; k++) { if(simll[k] < 0) { simMaxLL = simll[k]; k++; break; } } for(; k < locCount * wCount; k++) { if(simll[k] < 0 && simll[k] > simMaxLL) { simMaxLL = simll[k]; } } if(simMaxLL < 0) { for(int j = 0; j < nClusters; j++) { if(simMaxLL > clusterLL[j]) { nExtreme[j] ++; } } } } free(simCasInW); free(simConInW); free(simll); free(indAll); free(simCass); free(simCons); return nExtreme; } int * monteCarloOld(double * x, double * y, int * locEnding, int locCount, int casCount, int allCount, int * clusterCase, int * centerID, double * cRadius, bool * highCluster, int nClusters, int nSim) { int * nExtreme; if(NULL == (nExtreme = (int *) malloc (nClusters * sizeof(int)))) { printf("ERROR: Out of memory at line %d in file %s\n", __LINE__, __FILE__); exit(1); } for(int i = 0; i < nClusters; i++) nExtreme[i] = 0; int * indAll; int * simCass; if(NULL == (indAll = (int *) malloc (allCount * sizeof(int)))) { printf("ERROR: Out of memory at line %d in file %s\n", __LINE__, __FILE__); exit(1); } if(NULL == (simCass = (int *) malloc (locCount * sizeof(int)))) { printf("ERROR: Out of memory at line %d in file %s\n", __LINE__, __FILE__); exit(1); } int indID, simCas; for(int i = 0; i < nSim; i++) { randomLabel(indAll, casCount, allCount); indID = 0; for(int j = 0; j < locCount; j++) { simCas = 0; for(; indID < locEnding[j]; indID ++) { if(indAll[indID] == 1) simCas ++; } simCass[j] = simCas; } #pragma omp parallel for for(int j = 0; j < nClusters; j++) { double xC = x[centerID[j]]; double yC = y[centerID[j]]; double rad2 = cRadius[j] * cRadius[j]; int simCasInc = 0; for(int k = 0; k < locCount; k++) { if((x[k] - xC) * (x[k] - xC) + (y[k] - yC) * (y[k] - yC) <= rad2) { simCasInc += simCass[k]; } } if(highCluster[j] && simCasInc >= clusterCase[j]) nExtreme[j] ++; else if(!highCluster[j] && simCasInc <= clusterCase[j]) nExtreme[j] ++; } } free(indAll); free(simCass); return nExtreme; }
attribute.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % AAA TTTTT TTTTT RRRR IIIII BBBB U U TTTTT EEEEE % % A A T T R R I B B U U T E % % AAAAA T T RRRR I BBBB U U T EEE % % A A T T R R I B B U U T E % % A A T T R R IIIII BBBB UUU T EEEEE % % % % % % MagickCore Get / Set Image Attributes % % % % Software Design % % Cristy % % October 2002 % % % % % % Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/artifact.h" #include "magick/attribute.h" #include "magick/blob.h" #include "magick/blob-private.h" #include "magick/cache.h" #include "magick/cache-private.h" #include "magick/cache-view.h" #include "magick/client.h" #include "magick/channel.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colormap.h" #include "magick/colormap-private.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/composite.h" #include "magick/composite-private.h" #include "magick/constitute.h" #include "magick/deprecate.h" #include "magick/draw.h" #include "magick/draw-private.h" #include "magick/effect.h" #include "magick/enhance.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/geometry.h" #include "magick/histogram.h" #include "magick/identify.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/log.h" #include "magick/memory_.h" #include "magick/magick.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/paint.h" #include "magick/pixel.h" #include "magick/pixel-private.h" #include "magick/property.h" #include "magick/quantize.h" #include "magick/random_.h" #include "magick/resource_.h" #include "magick/semaphore.h" #include "magick/segment.h" #include "magick/splay-tree.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/thread-private.h" #include "magick/threshold.h" #include "magick/transform.h" #include "magick/utility.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e B o u n d i n g B o x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageBoundingBox() returns the bounding box of an image canvas. % % The format of the GetImageBoundingBox method is: % % RectangleInfo GetImageBoundingBox(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o bounds: Method GetImageBoundingBox returns the bounding box of an % image canvas. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ typedef struct _EdgeInfo { double left, right, top, bottom; } EdgeInfo; static double GetEdgeBackgroundFactor(const Image *image, const CacheView *image_view,const GravityType gravity,const size_t width, const size_t height,const ssize_t x_offset,const ssize_t y_offset, ExceptionInfo *exception) { CacheView *edge_view; const char *artifact; double factor; Image *edge_image; MagickPixelPacket background, pixel; RectangleInfo edge_geometry; const PixelPacket *p; ssize_t y; /* Determine the percent of image background for this edge. */ switch (gravity) { case NorthWestGravity: case NorthGravity: default: { p=GetCacheViewVirtualPixels(image_view,0,0,1,1,exception); break; } case NorthEastGravity: case EastGravity: { p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,0,1,1, exception); break; } case SouthEastGravity: case SouthGravity: { p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1, (ssize_t) image->rows-1,1,1,exception); break; } case SouthWestGravity: case WestGravity: { p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-1,1,1, exception); break; } } GetMagickPixelPacket(image,&background); SetMagickPixelPacket(image,p,(IndexPacket *) NULL,&background); artifact=GetImageArtifact(image,"background"); if (artifact != (const char *) NULL) (void) QueryMagickColor(artifact,&background,exception); artifact=GetImageArtifact(image,"trim:background-color"); if (artifact != (const char *) NULL) (void) QueryMagickColor(artifact,&background,exception); edge_geometry.width=width; edge_geometry.height=height; edge_geometry.x=x_offset; edge_geometry.y=y_offset; GravityAdjustGeometry(image->columns,image->rows,gravity,&edge_geometry); edge_image=CropImage(image,&edge_geometry,exception); if (edge_image == (Image *) NULL) return(0.0); factor=0.0; GetMagickPixelPacket(edge_image,&pixel); edge_view=AcquireVirtualCacheView(edge_image,exception); for (y=0; y < (ssize_t) edge_image->rows; y++) { ssize_t x; p=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; for (x=0; x < (ssize_t) edge_image->columns; x++) { SetMagickPixelPacket(edge_image,p,(IndexPacket *) NULL,&pixel); if (IsMagickColorSimilar(&pixel,&background) == MagickFalse) factor++; p++; } } factor/=((double) edge_image->columns*edge_image->rows); edge_view=DestroyCacheView(edge_view); edge_image=DestroyImage(edge_image); return(factor); } static inline double GetMinEdgeBackgroundFactor(const EdgeInfo *edge) { double factor; factor=MagickMin(MagickMin(MagickMin(edge->left,edge->right),edge->top), edge->bottom); return(factor); } static RectangleInfo GetEdgeBoundingBox(const Image *image, ExceptionInfo *exception) { CacheView *edge_view; const char *artifact; double background_factor, percent_background; EdgeInfo edge, vertex; Image *edge_image; RectangleInfo bounds; /* Get the image bounding box. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); SetGeometry(image,&bounds); edge_image=CloneImage(image,0,0,MagickTrue,exception); if (edge_image == (Image *) NULL) return(bounds); (void) ParseAbsoluteGeometry("0x0+0+0",&edge_image->page); memset(&vertex,0,sizeof(vertex)); edge_view=AcquireVirtualCacheView(edge_image,exception); edge.left=GetEdgeBackgroundFactor(edge_image,edge_view,WestGravity, 1,0,0,0,exception); edge.right=GetEdgeBackgroundFactor(edge_image,edge_view,EastGravity, 1,0,0,0,exception); edge.top=GetEdgeBackgroundFactor(edge_image,edge_view,NorthGravity, 0,1,0,0,exception); edge.bottom=GetEdgeBackgroundFactor(edge_image,edge_view,SouthGravity, 0,1,0,0,exception); percent_background=1.0; artifact=GetImageArtifact(edge_image,"trim:percent-background"); if (artifact != (const char *) NULL) percent_background=StringToDouble(artifact,(char **) NULL)/100.0; percent_background=MagickMin(MagickMax(1.0-percent_background,MagickEpsilon), 1.0); background_factor=GetMinEdgeBackgroundFactor(&edge); for ( ; background_factor < percent_background; background_factor=GetMinEdgeBackgroundFactor(&edge)) { if ((bounds.width == 0) || (bounds.height == 0)) break; if (fabs(edge.left-background_factor) < MagickEpsilon) { /* Trim left edge. */ vertex.left++; bounds.width--; edge.left=GetEdgeBackgroundFactor(edge_image,edge_view, NorthWestGravity,1,bounds.height,(ssize_t) vertex.left,(ssize_t) vertex.top,exception); edge.top=GetEdgeBackgroundFactor(edge_image,edge_view, NorthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t) vertex.top,exception); edge.bottom=GetEdgeBackgroundFactor(edge_image,edge_view, SouthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t) vertex.bottom,exception); continue; } if (fabs(edge.right-background_factor) < MagickEpsilon) { /* Trim right edge. */ vertex.right++; bounds.width--; edge.right=GetEdgeBackgroundFactor(edge_image,edge_view, NorthEastGravity,1,bounds.height,(ssize_t) vertex.right,(ssize_t) vertex.top,exception); edge.top=GetEdgeBackgroundFactor(edge_image,edge_view, NorthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t) vertex.top,exception); edge.bottom=GetEdgeBackgroundFactor(edge_image,edge_view, SouthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t) vertex.bottom,exception); continue; } if (fabs(edge.top-background_factor) < MagickEpsilon) { /* Trim top edge. */ vertex.top++; bounds.height--; edge.left=GetEdgeBackgroundFactor(edge_image,edge_view, NorthWestGravity,1,bounds.height,(ssize_t) vertex.left,(ssize_t) vertex.top,exception); edge.right=GetEdgeBackgroundFactor(edge_image,edge_view, NorthEastGravity,1,bounds.height,(ssize_t) vertex.right,(ssize_t) vertex.top,exception); edge.top=GetEdgeBackgroundFactor(edge_image,edge_view, NorthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t) vertex.top,exception); continue; } if (fabs(edge.bottom-background_factor) < MagickEpsilon) { /* Trim bottom edge. */ vertex.bottom++; bounds.height--; edge.left=GetEdgeBackgroundFactor(edge_image,edge_view, NorthWestGravity,1,bounds.height,(ssize_t) vertex.left,(ssize_t) vertex.top,exception); edge.right=GetEdgeBackgroundFactor(edge_image,edge_view, NorthEastGravity,1,bounds.height,(ssize_t) vertex.right,(ssize_t) vertex.top,exception); edge.bottom=GetEdgeBackgroundFactor(edge_image,edge_view, SouthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t) vertex.bottom,exception); continue; } } edge_view=DestroyCacheView(edge_view); edge_image=DestroyImage(edge_image); bounds.x=(ssize_t) vertex.left; bounds.y=(ssize_t) vertex.top; if ((bounds.width == 0) || (bounds.height == 0)) (void) ThrowMagickException(exception,GetMagickModule(),OptionWarning, "GeometryDoesNotContainImage","`%s'",image->filename); return(bounds); } MagickExport RectangleInfo GetImageBoundingBox(const Image *image, ExceptionInfo *exception) { CacheView *image_view; const char *artifact; MagickBooleanType status; MagickPixelPacket target[4], zero; RectangleInfo bounds; const PixelPacket *p; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); artifact=GetImageArtifact(image,"trim:percent-background"); if (artifact != (const char *) NULL) return(GetEdgeBoundingBox(image,exception)); bounds.width=0; bounds.height=0; bounds.x=(ssize_t) image->columns; bounds.y=(ssize_t) image->rows; GetMagickPixelPacket(image,&target[0]); image_view=AcquireVirtualCacheView(image,exception); p=GetCacheViewVirtualPixels(image_view,0,0,1,1,exception); if (p == (const PixelPacket *) NULL) { image_view=DestroyCacheView(image_view); return(bounds); } SetMagickPixelPacket(image,p,GetCacheViewVirtualIndexQueue(image_view), &target[0]); GetMagickPixelPacket(image,&target[1]); p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,0,1,1, exception); if (p != (const PixelPacket *) NULL) SetMagickPixelPacket(image,p,GetCacheViewVirtualIndexQueue(image_view), &target[1]); GetMagickPixelPacket(image,&target[2]); p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-1,1,1, exception); if (p != (const PixelPacket *) NULL) SetMagickPixelPacket(image,p,GetCacheViewVirtualIndexQueue(image_view), &target[2]); GetMagickPixelPacket(image,&target[3]); p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1, (ssize_t) image->rows-1,1,1,exception); if (p != (const PixelPacket *) NULL) SetMagickPixelPacket(image,p,GetCacheViewVirtualIndexQueue(image_view), &target[3]); status=MagickTrue; GetMagickPixelPacket(image,&zero); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickPixelPacket pixel; RectangleInfo bounding_box; const IndexPacket *magick_restrict indexes; const PixelPacket *magick_restrict p; ssize_t x; if (status == MagickFalse) continue; #if defined(MAGICKCORE_OPENMP_SUPPORT) # pragma omp critical (MagickCore_GetImageBoundingBox) #endif bounding_box=bounds; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { SetMagickPixelPacket(image,p,indexes+x,&pixel); if ((x < bounding_box.x) && (IsMagickColorSimilar(&pixel,&target[0]) == MagickFalse)) bounding_box.x=x; if ((x > (ssize_t) bounding_box.width) && (IsMagickColorSimilar(&pixel,&target[1]) == MagickFalse)) bounding_box.width=(size_t) x; if ((y < bounding_box.y) && (IsMagickColorSimilar(&pixel,&target[0]) == MagickFalse)) bounding_box.y=y; if ((y > (ssize_t) bounding_box.height) && (IsMagickColorSimilar(&pixel,&target[2]) == MagickFalse)) bounding_box.height=(size_t) y; if ((x < (ssize_t) bounding_box.width) && (y > (ssize_t) bounding_box.height) && (IsMagickColorSimilar(&pixel,&target[3]) == MagickFalse)) { bounding_box.width=(size_t) x; bounding_box.height=(size_t) y; } p++; } #if defined(MAGICKCORE_OPENMP_SUPPORT) # pragma omp critical (MagickCore_GetImageBoundingBox) #endif { if (bounding_box.x < bounds.x) bounds.x=bounding_box.x; if (bounding_box.y < bounds.y) bounds.y=bounding_box.y; if (bounding_box.width > bounds.width) bounds.width=bounding_box.width; if (bounding_box.height > bounds.height) bounds.height=bounding_box.height; } } image_view=DestroyCacheView(image_view); if ((bounds.width == 0) || (bounds.height == 0)) (void) ThrowMagickException(exception,GetMagickModule(),OptionWarning, "GeometryDoesNotContainImage","`%s'",image->filename); else { bounds.width-=(bounds.x-1); bounds.height-=(bounds.y-1); } return(bounds); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e C h a n n e l D e p t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageChannelDepth() returns the depth of a particular image channel. % % The format of the GetImageChannelDepth method is: % % size_t GetImageDepth(const Image *image,ExceptionInfo *exception) % size_t GetImageChannelDepth(const Image *image, % const ChannelType channel,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o exception: return any errors or warnings in this structure. % */ MagickExport size_t GetImageDepth(const Image *image,ExceptionInfo *exception) { return(GetImageChannelDepth(image,CompositeChannels,exception)); } MagickExport size_t GetImageChannelDepth(const Image *image, const ChannelType channel,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; ssize_t i; size_t *current_depth, depth, number_threads; ssize_t y; /* Compute image depth. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); number_threads=(size_t) GetMagickResourceLimit(ThreadResource); current_depth=(size_t *) AcquireQuantumMemory(number_threads, sizeof(*current_depth)); if (current_depth == (size_t *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); status=MagickTrue; for (i=0; i < (ssize_t) number_threads; i++) current_depth[i]=1; if ((image->storage_class == PseudoClass) && (image->matte == MagickFalse)) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->colors,1) #endif for (i=0; i < (ssize_t) image->colors; i++) { const int id = GetOpenMPThreadId(); while (current_depth[id] < MAGICKCORE_QUANTUM_DEPTH) { MagickBooleanType atDepth; QuantumAny range; atDepth=MagickTrue; range=GetQuantumRange(current_depth[id]); if ((channel & RedChannel) != 0) if (IsPixelAtDepth(image->colormap[i].red,range) == MagickFalse) atDepth=MagickFalse; if ((atDepth != MagickFalse) && ((channel & GreenChannel) != 0)) if (IsPixelAtDepth(image->colormap[i].green,range) == MagickFalse) atDepth=MagickFalse; if ((atDepth != MagickFalse) && ((channel & BlueChannel) != 0)) if (IsPixelAtDepth(image->colormap[i].blue,range) == MagickFalse) atDepth=MagickFalse; if ((atDepth != MagickFalse)) break; current_depth[id]++; } } depth=current_depth[0]; for (i=1; i < (ssize_t) number_threads; i++) if (depth < current_depth[i]) depth=current_depth[i]; current_depth=(size_t *) RelinquishMagickMemory(current_depth); return(depth); } image_view=AcquireVirtualCacheView(image,exception); #if !defined(MAGICKCORE_HDRI_SUPPORT) DisableMSCWarning(4127) if (1UL*QuantumRange <= MaxMap) RestoreMSCWarning { size_t *depth_map; /* Scale pixels to desired (optimized with depth map). */ depth_map=(size_t *) AcquireQuantumMemory(MaxMap+1,sizeof(*depth_map)); if (depth_map == (size_t *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); for (i=0; i <= (ssize_t) MaxMap; i++) { unsigned int depth; for (depth=1; depth < MAGICKCORE_QUANTUM_DEPTH; depth++) { Quantum pixel; QuantumAny range; range=GetQuantumRange(depth); pixel=(Quantum) i; if (pixel == ScaleAnyToQuantum(ScaleQuantumToAny(pixel,range),range)) break; } depth_map[i]=depth; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); const IndexPacket *magick_restrict indexes; const PixelPacket *magick_restrict p; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) continue; indexes=GetCacheViewVirtualIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { Quantum pixel; if ((channel & RedChannel) != 0) { pixel=GetPixelRed(p); if (depth_map[ScaleQuantumToMap(pixel)] > current_depth[id]) current_depth[id]=depth_map[ScaleQuantumToMap(pixel)]; } if ((channel & GreenChannel) != 0) { pixel=GetPixelGreen(p); if (depth_map[ScaleQuantumToMap(pixel)] > current_depth[id]) current_depth[id]=depth_map[ScaleQuantumToMap(pixel)]; } if ((channel & BlueChannel) != 0) { pixel=GetPixelBlue(p); if (depth_map[ScaleQuantumToMap(pixel)] > current_depth[id]) current_depth[id]=depth_map[ScaleQuantumToMap(pixel)]; } if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) { pixel=GetPixelOpacity(p); if (depth_map[ScaleQuantumToMap(pixel)] > current_depth[id]) current_depth[id]=depth_map[ScaleQuantumToMap(pixel)]; } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { pixel=GetPixelIndex(indexes+x); if (depth_map[ScaleQuantumToMap(pixel)] > current_depth[id]) current_depth[id]=depth_map[ScaleQuantumToMap(pixel)]; } p++; } if (current_depth[id] == MAGICKCORE_QUANTUM_DEPTH) status=MagickFalse; } image_view=DestroyCacheView(image_view); depth=current_depth[0]; for (i=1; i < (ssize_t) number_threads; i++) if (depth < current_depth[i]) depth=current_depth[i]; depth_map=(size_t *) RelinquishMagickMemory(depth_map); current_depth=(size_t *) RelinquishMagickMemory(current_depth); return(depth); } #endif #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); const IndexPacket *magick_restrict indexes; const PixelPacket *magick_restrict p; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) continue; indexes=GetCacheViewVirtualIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { while (current_depth[id] < MAGICKCORE_QUANTUM_DEPTH) { MagickBooleanType atDepth; QuantumAny range; atDepth=MagickTrue; range=GetQuantumRange(current_depth[id]); if ((atDepth != MagickFalse) && ((channel & RedChannel) != 0)) if (IsPixelAtDepth(GetPixelRed(p),range) == MagickFalse) atDepth=MagickFalse; if ((atDepth != MagickFalse) && ((channel & GreenChannel) != 0)) if (IsPixelAtDepth(GetPixelGreen(p),range) == MagickFalse) atDepth=MagickFalse; if ((atDepth != MagickFalse) && ((channel & BlueChannel) != 0)) if (IsPixelAtDepth(GetPixelBlue(p),range) == MagickFalse) atDepth=MagickFalse; if ((atDepth != MagickFalse) && ((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) if (IsPixelAtDepth(GetPixelOpacity(p),range) == MagickFalse) atDepth=MagickTrue; if ((atDepth != MagickFalse) && ((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) if (IsPixelAtDepth(GetPixelIndex(indexes+x),range) == MagickFalse) atDepth=MagickFalse; if ((atDepth != MagickFalse)) break; current_depth[id]++; } p++; } if (current_depth[id] == MAGICKCORE_QUANTUM_DEPTH) status=MagickFalse; } image_view=DestroyCacheView(image_view); depth=current_depth[0]; for (i=1; i < (ssize_t) number_threads; i++) if (depth < current_depth[i]) depth=current_depth[i]; current_depth=(size_t *) RelinquishMagickMemory(current_depth); return(depth); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e Q u a n t u m D e p t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageQuantumDepth() returns the depth of the image rounded to a legal % quantum depth: 8, 16, or 32. % % The format of the GetImageQuantumDepth method is: % % size_t GetImageQuantumDepth(const Image *image, % const MagickBooleanType constrain) % % A description of each parameter follows: % % o image: the image. % % o constrain: A value other than MagickFalse, constrains the depth to % a maximum of MAGICKCORE_QUANTUM_DEPTH. % */ MagickExport size_t GetImageQuantumDepth(const Image *image, const MagickBooleanType constrain) { size_t depth; depth=image->depth; if (depth <= 8) depth=8; else if (depth <= 16) depth=16; else if (depth <= 32) depth=32; else if (depth <= 64) depth=64; if (constrain != MagickFalse) depth=(size_t) MagickMin((double) depth,(double) MAGICKCORE_QUANTUM_DEPTH); return(depth); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e T y p e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageType() returns the potential type of image: % % Bilevel Grayscale GrayscaleMatte % Palette PaletteMatte TrueColor % TrueColorMatte ColorSeparation ColorSeparationMatte % % To ensure the image type matches its potential, use SetImageType(): % % (void) SetImageType(image,GetImageType(image)); % % The format of the GetImageType method is: % % ImageType GetImageType(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport ImageType GetImageType(const Image *image,ExceptionInfo *exception) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->colorspace == CMYKColorspace) { if (image->matte == MagickFalse) return(ColorSeparationType); return(ColorSeparationMatteType); } if (IsMonochromeImage(image,exception) != MagickFalse) return(BilevelType); if (IsGrayImage(image,exception) != MagickFalse) { if (image->matte != MagickFalse) return(GrayscaleMatteType); return(GrayscaleType); } if (IsPaletteImage(image,exception) != MagickFalse) { if (image->matte != MagickFalse) return(PaletteMatteType); return(PaletteType); } if (image->matte != MagickFalse) return(TrueColorMatteType); return(TrueColorType); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I d e n t i f y I m a g e G r a y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % either 0 or QuantumRange. Otherwise undefined is returned. % % The format of the IdentifyImageGray method is: % % ImageType IdentifyImageGray(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport ImageType IdentifyImageGray(const Image *image, ExceptionInfo *exception) { CacheView *image_view; ImageType type; const PixelPacket *p; ssize_t x; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((image->type == BilevelType) || (image->type == GrayscaleType) || (image->type == GrayscaleMatteType)) return(image->type); if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) return(UndefinedType); type=BilevelType; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (IsPixelGray(p) == MagickFalse) { type=UndefinedType; break; } if ((type == BilevelType) && (IsPixelMonochrome(p) == MagickFalse)) type=GrayscaleType; p++; } if (type == UndefinedType) break; } image_view=DestroyCacheView(image_view); if ((type == GrayscaleType) && (image->matte != MagickFalse)) type=GrayscaleMatteType; return(type); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I d e n t i f y I m a g e M o n o c h r o m e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IdentifyImageMonochrome() returns MagickTrue if all the pixels in the image % have the same red, green, and blue intensities and the intensity is either % 0 or QuantumRange. % % The format of the IdentifyImageMonochrome method is: % % MagickBooleanType IdentifyImageMonochrome(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType IdentifyImageMonochrome(const Image *image, ExceptionInfo *exception) { CacheView *image_view; ImageType type; ssize_t x; const PixelPacket *p; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->type == BilevelType) return(MagickTrue); if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) return(MagickFalse); type=BilevelType; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (IsPixelMonochrome(p) == MagickFalse) { type=UndefinedType; break; } p++; } if (type == UndefinedType) break; } image_view=DestroyCacheView(image_view); if (type == BilevelType) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I d e n t i f y I m a g e T y p e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IdentifyImageType() returns the potential type of image: % % Bilevel Grayscale GrayscaleMatte % Palette PaletteMatte TrueColor % TrueColorMatte ColorSeparation ColorSeparationMatte % % To ensure the image type matches its potential, use SetImageType(): % % (void) SetImageType(image,IdentifyImageType(image,exception),exception); % % The format of the IdentifyImageType method is: % % ImageType IdentifyImageType(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport ImageType IdentifyImageType(const Image *image, ExceptionInfo *exception) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->colorspace == CMYKColorspace) { if (image->matte == MagickFalse) return(ColorSeparationType); return(ColorSeparationMatteType); } if (IdentifyImageMonochrome(image,exception) != MagickFalse) return(BilevelType); if (IdentifyImageGray(image,exception) != UndefinedType) { if (image->matte != MagickFalse) return(GrayscaleMatteType); return(GrayscaleType); } if (IdentifyPaletteImage(image,exception) != MagickFalse) { if (image->matte != MagickFalse) return(PaletteMatteType); return(PaletteType); } if (image->matte != MagickFalse) return(TrueColorMatteType); return(TrueColorType); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s G r a y I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsGrayImage() returns MagickTrue if the type of the image is grayscale or % bi-level. % % The format of the IsGrayImage method is: % % MagickBooleanType IsGrayImage(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType IsGrayImage(const Image *image, ExceptionInfo *exception) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); magick_unreferenced(exception); if ((image->type == BilevelType) || (image->type == GrayscaleType) || (image->type == GrayscaleMatteType)) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s M o n o c h r o m e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsMonochromeImage() returns MagickTrue if type of the image is bi-level. % % The format of the IsMonochromeImage method is: % % MagickBooleanType IsMonochromeImage(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType IsMonochromeImage(const Image *image, ExceptionInfo *exception) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); magick_unreferenced(exception); if (image->type == BilevelType) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s O p a q u e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsOpaqueImage() returns MagickTrue if none of the pixels in the image have % an opacity value other than opaque (0). % % The format of the IsOpaqueImage method is: % % MagickBooleanType IsOpaqueImage(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType IsOpaqueImage(const Image *image, ExceptionInfo *exception) { CacheView *image_view; const PixelPacket *p; ssize_t x; ssize_t y; /* Determine if image is opaque. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->matte == MagickFalse) return(MagickTrue); image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelOpacity(p) != OpaqueOpacity) break; p++; } if (x < (ssize_t) image->columns) break; } image_view=DestroyCacheView(image_view); return(y < (ssize_t) image->rows ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e C h a n n e l D e p t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageChannelDepth() sets the depth of the image. % % The format of the SetImageChannelDepth method is: % % MagickBooleanType SetImageDepth(Image *image,const size_t depth) % MagickBooleanType SetImageChannelDepth(Image *image, % const ChannelType channel,const size_t depth) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o depth: the image depth. % */ MagickExport MagickBooleanType SetImageDepth(Image *image, const size_t depth) { return(SetImageChannelDepth(image,CompositeChannels,depth)); } MagickExport MagickBooleanType SetImageChannelDepth(Image *image, const ChannelType channel,const size_t depth) { CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; QuantumAny range; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (depth >= MAGICKCORE_QUANTUM_DEPTH) { image->depth=depth; return(MagickTrue); } range=GetQuantumRange(depth); if (image->storage_class == PseudoClass) { ssize_t i; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (i=0; i < (ssize_t) image->colors; i++) { if ((channel & RedChannel) != 0) image->colormap[i].red=ScaleAnyToQuantum(ScaleQuantumToAny( ClampPixel((MagickRealType) image->colormap[i].red),range),range); if ((channel & GreenChannel) != 0) image->colormap[i].green=ScaleAnyToQuantum(ScaleQuantumToAny( ClampPixel((MagickRealType) image->colormap[i].green),range),range); if ((channel & BlueChannel) != 0) image->colormap[i].blue=ScaleAnyToQuantum(ScaleQuantumToAny( ClampPixel((MagickRealType) image->colormap[i].blue),range),range); if ((channel & OpacityChannel) != 0) image->colormap[i].opacity=ScaleAnyToQuantum(ScaleQuantumToAny( ClampPixel((MagickRealType) image->colormap[i].opacity),range), range); } } status=MagickTrue; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if !defined(MAGICKCORE_HDRI_SUPPORT) DisableMSCWarning(4127) if (1UL*QuantumRange <= MaxMap) RestoreMSCWarning { Quantum *depth_map; ssize_t i; /* Scale pixels to desired (optimized with depth map). */ depth_map=(Quantum *) AcquireQuantumMemory(MaxMap+1,sizeof(*depth_map)); if (depth_map == (Quantum *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); for (i=0; i <= (ssize_t) MaxMap; i++) depth_map[i]=ScaleAnyToQuantum(ScaleQuantumToAny((Quantum) i,range), range); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { PixelPacket *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q,depth_map[ScaleQuantumToMap(GetPixelRed(q))]); if ((channel & GreenChannel) != 0) SetPixelGreen(q,depth_map[ScaleQuantumToMap(GetPixelGreen(q))]); if ((channel & BlueChannel) != 0) SetPixelBlue(q,depth_map[ScaleQuantumToMap(GetPixelBlue(q))]); if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) SetPixelOpacity(q,depth_map[ScaleQuantumToMap(GetPixelOpacity(q))]); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) { status=MagickFalse; continue; } } image_view=DestroyCacheView(image_view); depth_map=(Quantum *) RelinquishMagickMemory(depth_map); if (status != MagickFalse) image->depth=depth; return(status); } #endif /* Scale pixels to desired depth. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { PixelPacket *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q,ScaleAnyToQuantum(ScaleQuantumToAny(ClampPixel( (MagickRealType) GetPixelRed(q)),range),range)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ScaleAnyToQuantum(ScaleQuantumToAny(ClampPixel( (MagickRealType) GetPixelGreen(q)),range),range)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ScaleAnyToQuantum(ScaleQuantumToAny(ClampPixel( (MagickRealType) GetPixelBlue(q)),range),range)); if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) SetPixelOpacity(q,ScaleAnyToQuantum(ScaleQuantumToAny(ClampPixel( (MagickRealType) GetPixelOpacity(q)),range),range)); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) { status=MagickFalse; continue; } } image_view=DestroyCacheView(image_view); if (status != MagickFalse) image->depth=depth; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e T y p e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageType() sets the type of image. Choose from these types: % % BilevelType, GrayscaleType, GrayscaleMatteType, PaletteType, % PaletteMatteType, TrueColorType, TrueColorMatteType, % ColorSeparationType, ColorSeparationMatteType, OptimizeType % % The format of the SetImageType method is: % % MagickBooleanType SetImageType(Image *image,const ImageType type) % % A description of each parameter follows: % % o image: the image. % % o type: Image type. % */ MagickExport MagickBooleanType SetImageType(Image *image,const ImageType type) { const char *artifact; ImageInfo *image_info; MagickBooleanType status; QuantizeInfo *quantize_info; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); status=MagickTrue; image_info=AcquireImageInfo(); image_info->dither=image->dither; artifact=GetImageArtifact(image,"dither"); if (artifact != (const char *) NULL) (void) SetImageOption(image_info,"dither",artifact); switch (type) { case BilevelType: { status=TransformImageColorspace(image,GRAYColorspace); (void) NormalizeImage(image); quantize_info=AcquireQuantizeInfo(image_info); quantize_info->number_colors=2; quantize_info->colorspace=GRAYColorspace; status=QuantizeImage(quantize_info,image); quantize_info=DestroyQuantizeInfo(quantize_info); image->matte=MagickFalse; break; } case GrayscaleType: { status=TransformImageColorspace(image,GRAYColorspace); image->matte=MagickFalse; break; } case GrayscaleMatteType: { status=TransformImageColorspace(image,GRAYColorspace); if (image->matte == MagickFalse) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); break; } case PaletteType: { status=TransformImageColorspace(image,sRGBColorspace); if ((image->storage_class == DirectClass) || (image->colors > 256)) { quantize_info=AcquireQuantizeInfo(image_info); quantize_info->number_colors=256; status=QuantizeImage(quantize_info,image); quantize_info=DestroyQuantizeInfo(quantize_info); } image->matte=MagickFalse; break; } case PaletteBilevelMatteType: { status=TransformImageColorspace(image,sRGBColorspace); if (image->matte == MagickFalse) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); (void) BilevelImageChannel(image,AlphaChannel,(double) QuantumRange/2.0); quantize_info=AcquireQuantizeInfo(image_info); status=QuantizeImage(quantize_info,image); quantize_info=DestroyQuantizeInfo(quantize_info); break; } case PaletteMatteType: { status=TransformImageColorspace(image,sRGBColorspace); if (image->matte == MagickFalse) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); quantize_info=AcquireQuantizeInfo(image_info); quantize_info->colorspace=TransparentColorspace; status=QuantizeImage(quantize_info,image); quantize_info=DestroyQuantizeInfo(quantize_info); break; } case TrueColorType: { status=TransformImageColorspace(image,sRGBColorspace); if (image->storage_class != DirectClass) status=SetImageStorageClass(image,DirectClass); image->matte=MagickFalse; break; } case TrueColorMatteType: { status=TransformImageColorspace(image,sRGBColorspace); if (image->storage_class != DirectClass) status=SetImageStorageClass(image,DirectClass); if (image->matte == MagickFalse) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); break; } case ColorSeparationType: { status=TransformImageColorspace(image,CMYKColorspace); if (image->storage_class != DirectClass) status=SetImageStorageClass(image,DirectClass); image->matte=MagickFalse; break; } case ColorSeparationMatteType: { status=TransformImageColorspace(image,CMYKColorspace); if (image->storage_class != DirectClass) status=SetImageStorageClass(image,DirectClass); if (image->matte == MagickFalse) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); break; } case OptimizeType: case UndefinedType: break; } image_info=DestroyImageInfo(image_info); if (status == MagickFalse) return(MagickFalse); image->type=type; return(MagickTrue); }
callback.h
#define _BSD_SOURCE #define _DEFAULT_SOURCE #include <stdio.h> #include <inttypes.h> #include <omp.h> #include <ompt.h> #include "ompt-signal.h" // Used to detect architecture #include "../../src/kmp_platform.h" static const char* ompt_thread_type_t_values[] = { NULL, "ompt_thread_initial", "ompt_thread_worker", "ompt_thread_other" }; static const char* ompt_task_status_t_values[] = { NULL, "ompt_task_complete", "ompt_task_yield", "ompt_task_cancel", "ompt_task_others" }; static const char* ompt_cancel_flag_t_values[] = { "ompt_cancel_parallel", "ompt_cancel_sections", "ompt_cancel_do", "ompt_cancel_taskgroup", "ompt_cancel_activated", "ompt_cancel_detected", "ompt_cancel_discarded_task" }; static ompt_set_callback_t ompt_set_callback; static ompt_get_task_info_t ompt_get_task_info; static ompt_get_thread_data_t ompt_get_thread_data; static ompt_get_parallel_info_t ompt_get_parallel_info; static ompt_get_unique_id_t ompt_get_unique_id; static ompt_get_num_procs_t ompt_get_num_procs; static ompt_get_num_places_t ompt_get_num_places; static ompt_get_place_proc_ids_t ompt_get_place_proc_ids; static ompt_get_place_num_t ompt_get_place_num; static ompt_get_partition_place_nums_t ompt_get_partition_place_nums; static ompt_get_proc_id_t ompt_get_proc_id; static ompt_enumerate_states_t ompt_enumerate_states; static ompt_enumerate_mutex_impls_t ompt_enumerate_mutex_impls; static void print_ids(int level) { ompt_frame_t* frame ; ompt_data_t* parallel_data; ompt_data_t* task_data; int exists_task = ompt_get_task_info(level, NULL, &task_data, &frame, &parallel_data, NULL); if (frame) { printf("%" PRIu64 ": task level %d: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", exit_frame=%p, reenter_frame=%p\n", ompt_get_thread_data()->value, level, exists_task ? parallel_data->value : 0, exists_task ? task_data->value : 0, frame->exit_frame, frame->enter_frame); } else printf("%" PRIu64 ": task level %d: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", frame=%p\n", ompt_get_thread_data()->value, level, exists_task ? parallel_data->value : 0, exists_task ? task_data->value : 0, frame); } #define print_frame(level)\ do {\ printf("%" PRIu64 ": __builtin_frame_address(%d)=%p\n", ompt_get_thread_data()->value, level, __builtin_frame_address(level));\ } while(0) // clang (version 5.0 and above) adds an intermediate function call with debug flag (-g) #if defined(TEST_NEED_PRINT_FRAME_FROM_OUTLINED_FN) #if defined(DEBUG) && defined(__clang__) && __clang_major__ >= 5 #define print_frame_from_outlined_fn(level) print_frame(level+1) #else #define print_frame_from_outlined_fn(level) print_frame(level) #endif #if defined(__clang__) && __clang_major__ >= 5 #warning "Clang 5.0 and later add an additional wrapper for outlined functions when compiling with debug information." #warning "Please define -DDEBUG iff you manually pass in -g to make the tests succeed!" #endif #endif // This macro helps to define a label at the current position that can be used // to get the current address in the code. // // For print_current_address(): // To reliably determine the offset between the address of the label and the // actual return address, we insert a NOP instruction as a jump target as the // compiler would otherwise insert an instruction that we can't control. The // instruction length is target dependent and is explained below. // // (The empty block between "#pragma omp ..." and the __asm__ statement is a // workaround for a bug in the Intel Compiler.) #define define_ompt_label(id) \ {} \ __asm__("nop"); \ ompt_label_##id: // This macro helps to get the address of a label that is inserted by the above // macro define_ompt_label(). The address is obtained with a GNU extension // (&&label) that has been tested with gcc, clang and icc. #define get_ompt_label_address(id) (&& ompt_label_##id) // This macro prints the exact address that a previously called runtime function // returns to. #define print_current_address(id) \ define_ompt_label(id) \ print_possible_return_addresses(get_ompt_label_address(id)) #if KMP_ARCH_X86 || KMP_ARCH_X86_64 // On X86 the NOP instruction is 1 byte long. In addition, the comiler inserts // a MOV instruction for non-void runtime functions which is 3 bytes long. #define print_possible_return_addresses(addr) \ printf("%" PRIu64 ": current_address=%p or %p for non-void functions\n", \ ompt_get_thread_data()->value, ((char *)addr) - 1, ((char *)addr) - 4) #elif KMP_ARCH_PPC64 // On Power the NOP instruction is 4 bytes long. In addition, the compiler // inserts an LD instruction which accounts for another 4 bytes. In contrast to // X86 this instruction is always there, even for void runtime functions. #define print_possible_return_addresses(addr) \ printf("%" PRIu64 ": current_address=%p\n", ompt_get_thread_data()->value, \ ((char *)addr) - 8) #elif KMP_ARCH_AARCH64 // On AArch64 the NOP instruction is 4 bytes long, can be followed by inserted // store instruction (another 4 bytes long). #define print_possible_return_addresses(addr) \ printf("%" PRIu64 ": current_address=%p or %p\n", ompt_get_thread_data()->value, \ ((char *)addr) - 4, ((char *)addr) - 8) #else #error Unsupported target architecture, cannot determine address offset! #endif // This macro performs a somewhat similar job to print_current_address(), except // that it discards a certain number of nibbles from the address and only prints // the most significant bits / nibbles. This can be used for cases where the // return address can only be approximated. // // To account for overflows (ie the most significant bits / nibbles have just // changed as we are a few bytes above the relevant power of two) the addresses // of the "current" and of the "previous block" are printed. #define print_fuzzy_address(id) \ define_ompt_label(id) \ print_fuzzy_address_blocks(get_ompt_label_address(id)) // If you change this define you need to adapt all capture patterns in the tests // to include or discard the new number of nibbles! #define FUZZY_ADDRESS_DISCARD_NIBBLES 2 #define FUZZY_ADDRESS_DISCARD_BYTES (1 << ((FUZZY_ADDRESS_DISCARD_NIBBLES) * 4)) #define print_fuzzy_address_blocks(addr) \ printf("%" PRIu64 ": fuzzy_address=0x%" PRIx64 " or 0x%" PRIx64 " (%p)\n", \ ompt_get_thread_data()->value, \ ((uint64_t)addr) / FUZZY_ADDRESS_DISCARD_BYTES - 1, \ ((uint64_t)addr) / FUZZY_ADDRESS_DISCARD_BYTES, addr) static void format_task_type(int type, char* buffer) { char* progress = buffer; if(type & ompt_task_initial) progress += sprintf(progress, "ompt_task_initial"); if(type & ompt_task_implicit) progress += sprintf(progress, "ompt_task_implicit"); if(type & ompt_task_explicit) progress += sprintf(progress, "ompt_task_explicit"); if(type & ompt_task_target) progress += sprintf(progress, "ompt_task_target"); if(type & ompt_task_undeferred) progress += sprintf(progress, "|ompt_task_undeferred"); if(type & ompt_task_untied) progress += sprintf(progress, "|ompt_task_untied"); if(type & ompt_task_final) progress += sprintf(progress, "|ompt_task_final"); if(type & ompt_task_mergeable) progress += sprintf(progress, "|ompt_task_mergeable"); if(type & ompt_task_merged) progress += sprintf(progress, "|ompt_task_merged"); } static void on_ompt_callback_mutex_acquire( ompt_mutex_kind_t kind, unsigned int hint, unsigned int impl, ompt_wait_id_t wait_id, const void *codeptr_ra) { switch(kind) { case ompt_mutex_lock: printf("%" PRIu64 ": ompt_event_wait_lock: wait_id=%" PRIu64 ", hint=%" PRIu32 ", impl=%" PRIu32 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra); break; case ompt_mutex_nest_lock: printf("%" PRIu64 ": ompt_event_wait_nest_lock: wait_id=%" PRIu64 ", hint=%" PRIu32 ", impl=%" PRIu32 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra); break; case ompt_mutex_critical: printf("%" PRIu64 ": ompt_event_wait_critical: wait_id=%" PRIu64 ", hint=%" PRIu32 ", impl=%" PRIu32 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra); break; case ompt_mutex_atomic: printf("%" PRIu64 ": ompt_event_wait_atomic: wait_id=%" PRIu64 ", hint=%" PRIu32 ", impl=%" PRIu32 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra); break; case ompt_mutex_ordered: printf("%" PRIu64 ": ompt_event_wait_ordered: wait_id=%" PRIu64 ", hint=%" PRIu32 ", impl=%" PRIu32 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra); break; default: break; } } static void on_ompt_callback_mutex_acquired( ompt_mutex_kind_t kind, ompt_wait_id_t wait_id, const void *codeptr_ra) { switch(kind) { case ompt_mutex_lock: printf("%" PRIu64 ": ompt_event_acquired_lock: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; case ompt_mutex_nest_lock: printf("%" PRIu64 ": ompt_event_acquired_nest_lock_first: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; case ompt_mutex_critical: printf("%" PRIu64 ": ompt_event_acquired_critical: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; case ompt_mutex_atomic: printf("%" PRIu64 ": ompt_event_acquired_atomic: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; case ompt_mutex_ordered: printf("%" PRIu64 ": ompt_event_acquired_ordered: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; default: break; } } static void on_ompt_callback_mutex_released( ompt_mutex_kind_t kind, ompt_wait_id_t wait_id, const void *codeptr_ra) { switch(kind) { case ompt_mutex_lock: printf("%" PRIu64 ": ompt_event_release_lock: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; case ompt_mutex_nest_lock: printf("%" PRIu64 ": ompt_event_release_nest_lock_last: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; case ompt_mutex_critical: printf("%" PRIu64 ": ompt_event_release_critical: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; case ompt_mutex_atomic: printf("%" PRIu64 ": ompt_event_release_atomic: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; case ompt_mutex_ordered: printf("%" PRIu64 ": ompt_event_release_ordered: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; default: break; } } static void on_ompt_callback_nest_lock( ompt_scope_endpoint_t endpoint, ompt_wait_id_t wait_id, const void *codeptr_ra) { switch(endpoint) { case ompt_scope_begin: printf("%" PRIu64 ": ompt_event_acquired_nest_lock_next: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; case ompt_scope_end: printf("%" PRIu64 ": ompt_event_release_nest_lock_prev: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; } } static void on_ompt_callback_sync_region( ompt_sync_region_kind_t kind, ompt_scope_endpoint_t endpoint, ompt_data_t *parallel_data, ompt_data_t *task_data, const void *codeptr_ra) { switch(endpoint) { case ompt_scope_begin: switch(kind) { case ompt_sync_region_barrier: printf("%" PRIu64 ": ompt_event_barrier_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra); print_ids(0); break; case ompt_sync_region_taskwait: printf("%" PRIu64 ": ompt_event_taskwait_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra); break; case ompt_sync_region_taskgroup: printf("%" PRIu64 ": ompt_event_taskgroup_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra); break; } break; case ompt_scope_end: switch(kind) { case ompt_sync_region_barrier: printf("%" PRIu64 ": ompt_event_barrier_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, (parallel_data)?parallel_data->value:0, task_data->value, codeptr_ra); break; case ompt_sync_region_taskwait: printf("%" PRIu64 ": ompt_event_taskwait_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, (parallel_data)?parallel_data->value:0, task_data->value, codeptr_ra); break; case ompt_sync_region_taskgroup: printf("%" PRIu64 ": ompt_event_taskgroup_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, (parallel_data)?parallel_data->value:0, task_data->value, codeptr_ra); break; } break; } } static void on_ompt_callback_sync_region_wait( ompt_sync_region_kind_t kind, ompt_scope_endpoint_t endpoint, ompt_data_t *parallel_data, ompt_data_t *task_data, const void *codeptr_ra) { switch(endpoint) { case ompt_scope_begin: switch(kind) { case ompt_sync_region_barrier: printf("%" PRIu64 ": ompt_event_wait_barrier_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra); break; case ompt_sync_region_taskwait: printf("%" PRIu64 ": ompt_event_wait_taskwait_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra); break; case ompt_sync_region_taskgroup: printf("%" PRIu64 ": ompt_event_wait_taskgroup_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra); break; } break; case ompt_scope_end: switch(kind) { case ompt_sync_region_barrier: printf("%" PRIu64 ": ompt_event_wait_barrier_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, (parallel_data)?parallel_data->value:0, task_data->value, codeptr_ra); break; case ompt_sync_region_taskwait: printf("%" PRIu64 ": ompt_event_wait_taskwait_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, (parallel_data)?parallel_data->value:0, task_data->value, codeptr_ra); break; case ompt_sync_region_taskgroup: printf("%" PRIu64 ": ompt_event_wait_taskgroup_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, (parallel_data)?parallel_data->value:0, task_data->value, codeptr_ra); break; } break; } } static void on_ompt_callback_flush( ompt_data_t *thread_data, const void *codeptr_ra) { printf("%" PRIu64 ": ompt_event_flush: codeptr_ra=%p\n", thread_data->value, codeptr_ra); } static void on_ompt_callback_cancel( ompt_data_t *task_data, int flags, const void *codeptr_ra) { const char* first_flag_value; const char* second_flag_value; if(flags & ompt_cancel_parallel) first_flag_value = ompt_cancel_flag_t_values[0]; else if(flags & ompt_cancel_sections) first_flag_value = ompt_cancel_flag_t_values[1]; else if(flags & ompt_cancel_do) first_flag_value = ompt_cancel_flag_t_values[2]; else if(flags & ompt_cancel_taskgroup) first_flag_value = ompt_cancel_flag_t_values[3]; if(flags & ompt_cancel_activated) second_flag_value = ompt_cancel_flag_t_values[4]; else if(flags & ompt_cancel_detected) second_flag_value = ompt_cancel_flag_t_values[5]; else if(flags & ompt_cancel_discarded_task) second_flag_value = ompt_cancel_flag_t_values[6]; printf("%" PRIu64 ": ompt_event_cancel: task_data=%" PRIu64 ", flags=%s|%s=%" PRIu32 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, task_data->value, first_flag_value, second_flag_value, flags, codeptr_ra); } static void on_ompt_callback_idle( ompt_scope_endpoint_t endpoint) { switch(endpoint) { case ompt_scope_begin: printf("%" PRIu64 ": ompt_event_idle_begin:\n", ompt_get_thread_data()->value); break; case ompt_scope_end: printf("%" PRIu64 ": ompt_event_idle_end:\n", ompt_get_thread_data()->value); break; } } static void on_ompt_callback_implicit_task( ompt_scope_endpoint_t endpoint, ompt_data_t *parallel_data, ompt_data_t *task_data, unsigned int team_size, unsigned int thread_num) { switch(endpoint) { case ompt_scope_begin: if(task_data->ptr) printf("%s\n", "0: task_data initially not null"); task_data->value = ompt_get_unique_id(); printf("%" PRIu64 ": ompt_event_implicit_task_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", team_size=%" PRIu32 ", thread_num=%" PRIu32 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, team_size, thread_num); break; case ompt_scope_end: printf("%" PRIu64 ": ompt_event_implicit_task_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", team_size=%" PRIu32 ", thread_num=%" PRIu32 "\n", ompt_get_thread_data()->value, (parallel_data)?parallel_data->value:0, task_data->value, team_size, thread_num); break; } } static void on_ompt_callback_lock_init( ompt_mutex_kind_t kind, unsigned int hint, unsigned int impl, ompt_wait_id_t wait_id, const void *codeptr_ra) { switch(kind) { case ompt_mutex_lock: printf("%" PRIu64 ": ompt_event_init_lock: wait_id=%" PRIu64 ", hint=%" PRIu32 ", impl=%" PRIu32 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra); break; case ompt_mutex_nest_lock: printf("%" PRIu64 ": ompt_event_init_nest_lock: wait_id=%" PRIu64 ", hint=%" PRIu32 ", impl=%" PRIu32 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra); break; default: break; } } static void on_ompt_callback_lock_destroy( ompt_mutex_kind_t kind, ompt_wait_id_t wait_id, const void *codeptr_ra) { switch(kind) { case ompt_mutex_lock: printf("%" PRIu64 ": ompt_event_destroy_lock: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; case ompt_mutex_nest_lock: printf("%" PRIu64 ": ompt_event_destroy_nest_lock: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; default: break; } } static void on_ompt_callback_work( ompt_work_type_t wstype, ompt_scope_endpoint_t endpoint, ompt_data_t *parallel_data, ompt_data_t *task_data, uint64_t count, const void *codeptr_ra) { switch(endpoint) { case ompt_scope_begin: switch(wstype) { case ompt_work_loop: printf("%" PRIu64 ": ompt_event_loop_begin: parallel_id=%" PRIu64 ", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count); break; case ompt_work_sections: printf("%" PRIu64 ": ompt_event_sections_begin: parallel_id=%" PRIu64 ", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count); break; case ompt_work_single_executor: printf("%" PRIu64 ": ompt_event_single_in_block_begin: parallel_id=%" PRIu64 ", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count); break; case ompt_work_single_other: printf("%" PRIu64 ": ompt_event_single_others_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count); break; case ompt_work_workshare: //impl break; case ompt_work_distribute: printf("%" PRIu64 ": ompt_event_distribute_begin: parallel_id=%" PRIu64 ", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count); break; case ompt_work_taskloop: //impl printf("%" PRIu64 ": ompt_event_taskloop_begin: parallel_id=%" PRIu64 ", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count); break; } break; case ompt_scope_end: switch(wstype) { case ompt_work_loop: printf("%" PRIu64 ": ompt_event_loop_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count); break; case ompt_work_sections: printf("%" PRIu64 ": ompt_event_sections_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count); break; case ompt_work_single_executor: printf("%" PRIu64 ": ompt_event_single_in_block_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count); break; case ompt_work_single_other: printf("%" PRIu64 ": ompt_event_single_others_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count); break; case ompt_work_workshare: //impl break; case ompt_work_distribute: printf("%" PRIu64 ": ompt_event_distribute_end: parallel_id=%" PRIu64 ", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count); break; case ompt_work_taskloop: //impl printf("%" PRIu64 ": ompt_event_taskloop_end: parallel_id=%" PRIu64 ", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count); break; } break; } } static void on_ompt_callback_master( ompt_scope_endpoint_t endpoint, ompt_data_t *parallel_data, ompt_data_t *task_data, const void *codeptr_ra) { switch(endpoint) { case ompt_scope_begin: printf("%" PRIu64 ": ompt_event_master_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra); break; case ompt_scope_end: printf("%" PRIu64 ": ompt_event_master_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra); break; } } static void on_ompt_callback_parallel_begin( ompt_data_t *encountering_task_data, const ompt_frame_t *encountering_task_frame, ompt_data_t* parallel_data, uint32_t requested_team_size, ompt_invoker_t invoker, const void *codeptr_ra) { if(parallel_data->ptr) printf("0: parallel_data initially not null\n"); parallel_data->value = ompt_get_unique_id(); printf("%" PRIu64 ": ompt_event_parallel_begin: parent_task_id=%" PRIu64 ", parent_task_frame.exit=%p, parent_task_frame.reenter=%p, parallel_id=%" PRIu64 ", requested_team_size=%" PRIu32 ", codeptr_ra=%p, invoker=%d\n", ompt_get_thread_data()->value, encountering_task_data->value, encountering_task_frame->exit_frame, encountering_task_frame->enter_frame, parallel_data->value, requested_team_size, codeptr_ra, invoker); } static void on_ompt_callback_parallel_end( ompt_data_t *parallel_data, ompt_data_t *encountering_task_data, ompt_invoker_t invoker, const void *codeptr_ra) { printf("%" PRIu64 ": ompt_event_parallel_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", invoker=%d, codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, encountering_task_data->value, invoker, codeptr_ra); } static void on_ompt_callback_task_create( ompt_data_t *encountering_task_data, const ompt_frame_t *encountering_task_frame, ompt_data_t* new_task_data, int type, int has_dependences, const void *codeptr_ra) { if(new_task_data->ptr) printf("0: new_task_data initially not null\n"); new_task_data->value = ompt_get_unique_id(); char buffer[2048]; format_task_type(type, buffer); //there is no parallel_begin callback for implicit parallel region //thus it is initialized in initial task if(type & ompt_task_initial) { ompt_data_t *parallel_data; ompt_get_parallel_info(0, &parallel_data, NULL); if(parallel_data->ptr) printf("%s\n", "0: parallel_data initially not null"); parallel_data->value = ompt_get_unique_id(); } printf("%" PRIu64 ": ompt_event_task_create: parent_task_id=%" PRIu64 ", parent_task_frame.exit=%p, parent_task_frame.reenter=%p, new_task_id=%" PRIu64 ", codeptr_ra=%p, task_type=%s=%d, has_dependences=%s\n", ompt_get_thread_data()->value, encountering_task_data ? encountering_task_data->value : 0, encountering_task_frame ? encountering_task_frame->exit_frame : NULL, encountering_task_frame ? encountering_task_frame->enter_frame : NULL, new_task_data->value, codeptr_ra, buffer, type, has_dependences ? "yes" : "no"); } static void on_ompt_callback_task_schedule( ompt_data_t *first_task_data, ompt_task_status_t prior_task_status, ompt_data_t *second_task_data) { printf("%" PRIu64 ": ompt_event_task_schedule: first_task_id=%" PRIu64 ", second_task_id=%" PRIu64 ", prior_task_status=%s=%d\n", ompt_get_thread_data()->value, first_task_data->value, second_task_data->value, ompt_task_status_t_values[prior_task_status], prior_task_status); if(prior_task_status == ompt_task_complete) { printf("%" PRIu64 ": ompt_event_task_end: task_id=%" PRIu64 "\n", ompt_get_thread_data()->value, first_task_data->value); } } static void on_ompt_callback_task_dependences( ompt_data_t *task_data, const ompt_task_dependence_t *deps, int ndeps) { printf("%" PRIu64 ": ompt_event_task_dependences: task_id=%" PRIu64 ", deps=%p, ndeps=%d\n", ompt_get_thread_data()->value, task_data->value, (void *)deps, ndeps); } static void on_ompt_callback_task_dependence( ompt_data_t *first_task_data, ompt_data_t *second_task_data) { printf("%" PRIu64 ": ompt_event_task_dependence_pair: first_task_id=%" PRIu64 ", second_task_id=%" PRIu64 "\n", ompt_get_thread_data()->value, first_task_data->value, second_task_data->value); } static void on_ompt_callback_thread_begin( ompt_thread_type_t thread_type, ompt_data_t *thread_data) { if(thread_data->ptr) printf("%s\n", "0: thread_data initially not null"); thread_data->value = ompt_get_unique_id(); printf("%" PRIu64 ": ompt_event_thread_begin: thread_type=%s=%d, thread_id=%" PRIu64 "\n", ompt_get_thread_data()->value, ompt_thread_type_t_values[thread_type], thread_type, thread_data->value); } static void on_ompt_callback_thread_end( ompt_data_t *thread_data) { printf("%" PRIu64 ": ompt_event_thread_end: thread_id=%" PRIu64 "\n", ompt_get_thread_data()->value, thread_data->value); } static int on_ompt_callback_control_tool( uint64_t command, uint64_t modifier, void *arg, const void *codeptr_ra) { ompt_frame_t* omptTaskFrame; ompt_get_task_info(0, NULL, (ompt_data_t**) NULL, &omptTaskFrame, NULL, NULL); printf("%" PRIu64 ": ompt_event_control_tool: command=%" PRIu64 ", modifier=%" PRIu64 ", arg=%p, codeptr_ra=%p, current_task_frame.exit=%p, current_task_frame.reenter=%p \n", ompt_get_thread_data()->value, command, modifier, arg, codeptr_ra, omptTaskFrame->exit_frame, omptTaskFrame->enter_frame); return 0; //success } #define register_callback_t(name, type) \ do{ \ type f_##name = &on_##name; \ if (ompt_set_callback(name, (ompt_callback_t)f_##name) == \ ompt_set_never) \ printf("0: Could not register callback '" #name "'\n"); \ }while(0) #define register_callback(name) register_callback_t(name, name##_t) int ompt_initialize( ompt_function_lookup_t lookup, ompt_data_t *tool_data) { ompt_set_callback = (ompt_set_callback_t) lookup("ompt_set_callback"); ompt_get_task_info = (ompt_get_task_info_t) lookup("ompt_get_task_info"); ompt_get_thread_data = (ompt_get_thread_data_t) lookup("ompt_get_thread_data"); ompt_get_parallel_info = (ompt_get_parallel_info_t) lookup("ompt_get_parallel_info"); ompt_get_unique_id = (ompt_get_unique_id_t) lookup("ompt_get_unique_id"); ompt_get_num_procs = (ompt_get_num_procs_t) lookup("ompt_get_num_procs"); ompt_get_num_places = (ompt_get_num_places_t) lookup("ompt_get_num_places"); ompt_get_place_proc_ids = (ompt_get_place_proc_ids_t) lookup("ompt_get_place_proc_ids"); ompt_get_place_num = (ompt_get_place_num_t) lookup("ompt_get_place_num"); ompt_get_partition_place_nums = (ompt_get_partition_place_nums_t) lookup("ompt_get_partition_place_nums"); ompt_get_proc_id = (ompt_get_proc_id_t) lookup("ompt_get_proc_id"); ompt_enumerate_states = (ompt_enumerate_states_t) lookup("ompt_enumerate_states"); ompt_enumerate_mutex_impls = (ompt_enumerate_mutex_impls_t) lookup("ompt_enumerate_mutex_impls"); register_callback(ompt_callback_mutex_acquire); register_callback_t(ompt_callback_mutex_acquired, ompt_callback_mutex_t); register_callback_t(ompt_callback_mutex_released, ompt_callback_mutex_t); register_callback(ompt_callback_nest_lock); register_callback(ompt_callback_sync_region); register_callback_t(ompt_callback_sync_region_wait, ompt_callback_sync_region_t); register_callback(ompt_callback_control_tool); register_callback(ompt_callback_flush); register_callback(ompt_callback_cancel); register_callback(ompt_callback_idle); register_callback(ompt_callback_implicit_task); register_callback_t(ompt_callback_lock_init, ompt_callback_mutex_acquire_t); register_callback_t(ompt_callback_lock_destroy, ompt_callback_mutex_t); register_callback(ompt_callback_work); register_callback(ompt_callback_master); register_callback(ompt_callback_parallel_begin); register_callback(ompt_callback_parallel_end); register_callback(ompt_callback_task_create); register_callback(ompt_callback_task_schedule); register_callback(ompt_callback_task_dependences); register_callback(ompt_callback_task_dependence); register_callback(ompt_callback_thread_begin); register_callback(ompt_callback_thread_end); printf("0: NULL_POINTER=%p\n", (void*)NULL); return 1; //success } void ompt_finalize(ompt_data_t *tool_data) { printf("0: ompt_event_runtime_shutdown\n"); } ompt_start_tool_result_t* ompt_start_tool( unsigned int omp_version, const char *runtime_version) { static ompt_start_tool_result_t ompt_start_tool_result = {&ompt_initialize,&ompt_finalize, 0}; return &ompt_start_tool_result; }
GB_unaryop__lnot_bool_int8.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_bool_int8 // op(A') function: GB_tran__lnot_bool_int8 // C type: bool // A type: int8_t // cast: bool cij = (bool) aij // unaryop: cij = !aij #define GB_ATYPE \ int8_t #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !x ; // casting #define GB_CASTING(z, x) \ bool z = (bool) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_BOOL || GxB_NO_INT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_bool_int8 ( bool *restrict Cx, const int8_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_bool_int8 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unaryop__identity_bool_uint32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_bool_uint32 // op(A') function: GB_tran__identity_bool_uint32 // C type: bool // A type: uint32_t // cast: bool cij = (bool) aij // unaryop: cij = aij #define GB_ATYPE \ uint32_t #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ bool z = (bool) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_BOOL || GxB_NO_UINT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_bool_uint32 ( bool *restrict Cx, const uint32_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_bool_uint32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
ellipticFusedResidualAndNorm.c
/* The MIT License (MIT) Copyright (c) 2017 Tim Warburton, Noel Chalmers, Jesse Chan, Ali Karakus Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ // r = b - Ax // (r,r) extern "C" void fusedResidualAndNorm(const dlong & Nblocks, const dlong & N, const dlong & offset, const dfloat* __restrict__ weights, const dfloat* __restrict__ b_vec, const dfloat* __restrict__ Ax, dfloat* __restrict__ r, dfloat* __restrict__ reduction) { dfloat rdotr = 0.0; #pragma omp parallel for collapse(2) for(int fld = 0 ; fld < p_eNfields; ++fld){ for(int id = 0 ; id < N; ++id){ const dfloat rnew = b_vec[id + fld * offset] - Ax[id + fld * offset]; r[id + fld * offset] = rnew; rdotr += rnew * rnew * weights[id]; } } reduction[0] = rdotr; }
omp-matmat-mult.c
/***************************************************************************** Example : omp-matmat-mult.c Objective : Write an OpenMP Program of Matrix Matrix Multiplication and measure the performance. This example demonstrates the use of PARALLEL for Directive and Private clause Input : a) Number of threads b) Size of matrices (numofrows and noofcols of A and noofrows and Noofcols of B ) Output : Each thread computes the matrix matrix multiplication and master thread prints the final result matrix and also time taken for the computation. Author : RarchK *********************************************************************************/ #include <stdio.h> #include <sys/time.h> #include <omp.h> #include <stdlib.h> /* Main Program */ main(int argc,char **argv) { int NoofRows_A, NoofCols_A, NoofRows_B, NoofCols_B, i,j, k,Noofthreads; float **Matrix_A, **Matrix_B, **Result, **Checkoutput, flops; struct timeval TimeValue_Start; struct timezone TimeZone_Start; struct timeval TimeValue_Final; struct timezone TimeZone_Final; long time_start, time_end; double time_overhead; printf("\n\t\t---------------------------------------------------------------------------"); printf("\n\t\t Email : RarchK"); printf("\n\t\t---------------------------------------------------------------------------"); printf("\n\t\t Objective : Dense Matrix Computations (Floating Point Operations)\n "); printf("\n\t\t Matrix into Matrix Multiplication using "); printf("\n\t\t OpenMP one PARALLEL for directive and Private Clause;"); printf("\n\t\t..........................................................................\n"); /* Checking for command line arguments */ if( argc != 6 ){ printf("\t\t Very Few Arguments\n "); printf("\t\t Syntax : exec <Threads> <NoOfRows A> <NoOfCols A> <NoOfRows B> <NoOfCols B>\n"); exit(-1); } Noofthreads=atoi(argv[1]); if ((Noofthreads!=1) && (Noofthreads!=2) && (Noofthreads!=4) && (Noofthreads!=8) && (Noofthreads!= 16) ) { printf("\n Number of threads should be 1,2,4,8 or 16 for the execution of program. \n\n"); exit(-1); } NoofRows_A=atoi(argv[2]); NoofCols_A=atoi(argv[3]); NoofRows_B=atoi(argv[4]); NoofCols_B=atoi(argv[5]); /* printf("\n\t\t Read The Matrix Size Noofrows and Colums of Matrix A and B \n"); scanf("%d%d%d%d", &NoofRows_A, &NoofCols_A, &NoofRows_B, &NoofCols_B);*/ printf("\n\t\t Threads : %d ",Noofthreads); printf("\n\t\t Matrix A Size : %d X %d ", NoofRows_A,NoofCols_A); printf("\n\t\t Matrix B Size : %d X %d ", NoofRows_B,NoofCols_B); /* The NoofRows And NoofCols Should Be Of Positive integer */ if (NoofRows_A <= 0 || NoofCols_A <= 0 || NoofRows_B <= 0 || NoofCols_B <= 0) { printf("\n\t\t The NoofRows And NoofCols Should Be Of Positive Sign\n"); exit(1); } /* Checking For Necessary Condition */ if (NoofCols_A != NoofRows_B) { printf("\n\t\t Matrix Matrix Computation Is Not Possible \n"); exit(1); } /* Dynamic memory allocation and initialization of Matrix_A Elements */ Matrix_A = (float **) malloc(sizeof(float *) * NoofRows_A); for (i = 0; i < NoofRows_A; i++) { Matrix_A[i] = (float *) malloc(sizeof(float) * NoofCols_A); for (j = 0; j < NoofCols_A; j++) Matrix_A[i][j] = i + j; } /* Matrix_B Elements */ Matrix_B = (float **) malloc(sizeof(float *) * NoofRows_B); for (i = 0; i < NoofRows_B; i++) { Matrix_B[i] = (float *) malloc(sizeof(float) * NoofCols_B); for (j = 0; j < NoofCols_B; j++) Matrix_B[i][j] = i + j; } /* Dynamic Memory Allocation */ Result = (float **) malloc(sizeof(float *) * NoofRows_A); Checkoutput = (float **) malloc(sizeof(float *) * NoofRows_A); for (i = 0; i < NoofRows_A; i++) { Result[i] = (float *) malloc(sizeof(float) * NoofCols_B); Checkoutput[i] = (float *) malloc(sizeof(float) * NoofCols_B); for (j = 0; j < NoofCols_B; j++) { Result[i][j] = 0.0; Checkoutput[i][j] = 0.0; } } gettimeofday(&TimeValue_Start, &TimeZone_Start); omp_set_num_threads(Noofthreads); /* OpenMP Parallel For Directive : Fork a team of threads giving them their own copies of variables */ #pragma omp parallel for private(j,k) for (i = 0; i < NoofRows_A; i = i + 1) for (j = 0; j < NoofCols_B; j = j + 1) for (k = 0; k < NoofCols_A; k = k + 1) Result[i][j] = Result[i][j] + Matrix_A[i][k] * Matrix_B[k][j]; /* All threads join master thread and disband */ gettimeofday(&TimeValue_Final, &TimeZone_Final); /* Calculate the time taken for the computation */ time_start = TimeValue_Start.tv_sec * 1000000 + TimeValue_Start.tv_usec; time_end = TimeValue_Final.tv_sec * 1000000 + TimeValue_Final.tv_usec; time_overhead = (time_end - time_start)/1000000.0; /* Serial Computation */ for (i = 0; i < NoofRows_A; i = i + 1) for (j = 0; j < NoofCols_B; j = j + 1) for (k = 0; k < NoofCols_A; k = k + 1) Checkoutput[i][j] = Checkoutput[i][j] + Matrix_A[i][k] * Matrix_B[k][j]; for (i = 0; i < NoofRows_A; i = i + 1) for (j = 0; j < NoofCols_B; j = j + 1) if (Checkoutput[i][j] == Result[i][j]) continue; else { printf("\n\t\t There Is A Difference In Parallel Calculation \n"); exit(1); } printf("\n\n\t\t Matrix into Matrix Multiplication using Parallel for directive......Done \n"); printf("\n\t\t Time in Seconds (T) : %lf Seconds \n",time_overhead); printf("\n\t\t ( T represents the Time taken for computation )"); printf("\n\t\t..........................................................................\n"); /* Freeing Allocated Memory */ free(Matrix_A); free(Matrix_B); free(Result); free(Checkoutput); }
polybench.c
/** * polybench.c: This file is part of the PolyBench/C 3.2 test suite. * * * Contact: Louis-Noel Pouchet <pouchet@cse.ohio-state.edu> * Web address: http://polybench.sourceforge.net * License: /LICENSE.OSU.txt */ #include <stdio.h> #include <string.h> #include <stdlib.h> #include <unistd.h> #include <assert.h> #include <time.h> #include <sys/time.h> #include <sys/resource.h> #include <sched.h> #include <math.h> #ifdef _OPENMP # include <omp.h> #endif /* By default, collect PAPI counters on thread 0. */ #ifndef POLYBENCH_THREAD_MONITOR # define POLYBENCH_THREAD_MONITOR 0 #endif /* Total LLC cache size. By default 32+MB.. */ #ifndef POLYBENCH_CACHE_SIZE_KB # define POLYBENCH_CACHE_SIZE_KB 32770 #endif #include <omp.h> int polybench_papi_counters_threadid = 0; double polybench_program_total_flops = 0; #ifdef POLYBENCH_PAPI # include <papi.h> # define POLYBENCH_MAX_NB_PAPI_COUNTERS 96 #include "papi_counters.list" #endif /* Timer code (gettimeofday). */ double polybench_t_start; double polybench_t_end; /* Timer code (RDTSC). */ unsigned long long polybench_c_start; unsigned long long polybench_c_end; static double rtclock() { #ifdef POLYBENCH_TIME #else return 0; #endif } #ifdef POLYBENCH_CYCLE_ACCURATE_TIMER #endif void polybench_flush_cache() { int cs = ((32770 * 1024) / sizeof(double )); double *flush = (double *)(calloc(cs,sizeof(double ))); int i; double tmp = 0.0; #pragma omp parallel for private (i) reduction (+:tmp) firstprivate (cs) for (i = 0; i <= cs - 1; i += 1) { tmp += flush[i]; } (((void )(sizeof(((tmp <= 10.0?1 : 0))))) , (( { if (tmp <= 10.0) ; else __assert_fail("tmp <= 10.0","polybench.c",94,__PRETTY_FUNCTION__); }))); free(flush); } #ifdef POLYBENCH_LINUX_FIFO_SCHEDULER /* Use FIFO scheduler to limit OS interference. Program must be run as root, and this works only for Linux kernels. */ /* Restore to standard scheduler policy. */ #endif #ifdef POLYBENCH_PAPI # ifndef POLYBENCH_NO_FLUSH_CACHE # endif #ifdef POLYBENCH_PAPI_VERBOSE #endif #endif /* ! POLYBENCH_PAPI */ void polybench_prepare_instruments() { #ifndef POLYBENCH_NO_FLUSH_CACHE polybench_flush_cache(); #endif #ifdef POLYBENCH_LINUX_FIFO_SCHEDULER #endif } void polybench_timer_start() { polybench_prepare_instruments(); #ifndef POLYBENCH_CYCLE_ACCURATE_TIMER polybench_t_start = rtclock(); #else #endif } void polybench_timer_stop() { #ifndef POLYBENCH_CYCLE_ACCURATE_TIMER polybench_t_end = rtclock(); #else #endif #ifdef POLYBENCH_LINUX_FIFO_SCHEDULER #endif } void polybench_timer_print() { #ifdef POLYBENCH_GFLOPS #else # ifndef POLYBENCH_CYCLE_ACCURATE_TIMER printf("%0.6f\n",polybench_t_end - polybench_t_start); # else # endif #endif } static void *xmalloc(size_t num) { void *nnew = (void *)0; int ret = posix_memalign(&nnew,32,num); if (!nnew || ret) { fprintf(stderr,"[PolyBench] posix_memalign: cannot allocate memory"); exit(1); } return nnew; } void *polybench_alloc_data(unsigned long long n,int elt_size) { /// FIXME: detect overflow! size_t val = n; val *= elt_size; void *ret = xmalloc(val); return ret; }
testing_dlange.c
/** * * @file testing_dgemm.c * * PLASMA testing routines * PLASMA is a software package provided by Univ. of Tennessee, * Univ. of California Berkeley and Univ. of Colorado Denver * * @version 2.6.0 * @author Emmanuel Agullo * @author Mathieu Faverge * @date 2010-11-15 * @generated d Tue Jan 7 11:45:19 2014 * **/ #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <plasma.h> #include <cblas.h> #include <lapacke.h> #include <core_blas.h> #include "testing_dmain.h" #undef COMPLEX #define REAL int testing_dlange(int argc, char **argv) { /* Check for number of arguments*/ if ( argc != 3) { USAGE("LANGE", "M N LDA", " - M : number of rows of matrices A and C\n" " - N : number of columns of matrices B and C\n" " - LDA : leading dimension of matrix A\n"); return -1; } int M = atoi(argv[0]); int N = atoi(argv[1]); int LDA = atoi(argv[2]); int LDAxN = LDA*N; int n, u; double eps; double *A = (double *)malloc(LDAxN*sizeof(double)); #pragma omp register ([LDA*N]A) double *work = (double*) malloc(max(M,N)*sizeof(double)); int mmn = max(M,N); #pragma omp register ([mmn]work) double normplasma, normlapack, result; eps = LAPACKE_dlamch_work('e'); printf("\n"); printf("------ TESTS FOR PLASMA DLANGE ROUTINE ------- \n"); printf(" Size of the Matrix %d by %d\n", M, N); printf("\n"); printf(" The matrix A is randomly generated for each test.\n"); printf("============\n"); printf(" The relative machine precision (eps) is to be %e \n",eps); printf(" Computational tests pass if scaled residuals are less than 10.\n"); /*---------------------------------------------------------- * TESTING DLANGE */ /* Initialize A, B, C */ PLASMA_dplrnt( M, N, A, LDA, 3436 ); /* PLASMA DLANGE */ for(n=0; n<4; n++) { normplasma = PLASMA_dlange(norm[n], M, N, A, LDA); normlapack = LAPACKE_dlange_work(LAPACK_COL_MAJOR, lapack_const(norm[n]), M, N, A, LDA, work); printf("Lapack %e, Plasma %e\n", normlapack, normplasma); result = fabs(normplasma - normlapack) / (normlapack * eps); switch(norm[n]) { case PlasmaMaxNorm: /* result should be perfectly equal */ break; case PlasmaInfNorm: /* Sum order on the line can differ */ result = result / (double)N; break; case PlasmaOneNorm: /* Sum order on the column can differ */ result = result / (double)M; break; case PlasmaFrobeniusNorm: /* Sum oreder on every element can differ */ result = result / ((double)M * (double)N); break; } printf("***************************************************\n"); if ( result < 1. ) { printf(" ---- TESTING DLANGE (%s)............... PASSED !\n", normstr[n]); } else { printf(" - TESTING DLANGE (%s)... FAILED !\n", normstr[n]); } printf("***************************************************\n"); } /* Don't perform real tests while lapacke is not correct */ #ifdef COMPLEX /* PLASMA DLANTR */ for(n=0; n<4; n++) { for(u=0; u<2; u++) { int d; for(d=0; d<2; d++) { normplasma = PLASMA_dlantr(norm[n], uplo[u], diag[d], M, N, A, LDA); normlapack = LAPACKE_dlantr_work(LAPACK_COL_MAJOR, lapack_const(norm[n]), lapack_const(uplo[u]), lapack_const(diag[d]), M, N, A, LDA, work); printf("Lapack %e, Plasma %e\n", normlapack, normplasma); result = fabs(normplasma - normlapack) / (normlapack * eps); switch(norm[n]) { case PlasmaMaxNorm: /* result should be perfectly equal */ break; case PlasmaInfNorm: /* Sum order on the line can differ */ result = result / (double)N; break; case PlasmaOneNorm: /* Sum order on the column can differ */ result = result / (double)M; break; case PlasmaFrobeniusNorm: /* Sum oreder on every element can differ */ result = result / ((double)M * (double)N); break; } printf("***************************************************\n"); if ( result < 1. ) { printf(" ---- TESTING DLANTR (%s, %s, %s)......... PASSED !\n", normstr[n], uplostr[u], diagstr[d]); } else { printf(" - TESTING DLANTR (%s, %s, %s)... FAILED !\n", normstr[n], uplostr[u], diagstr[d]); } printf("***************************************************\n"); } } } #endif /* PLASMA DLANSY */ for(n=0; n<4; n++) { for(u=0; u<2; u++) { normplasma = PLASMA_dlansy(norm[n], uplo[u], min(M,N), A, LDA); normlapack = LAPACKE_dlansy_work(LAPACK_COL_MAJOR, lapack_const(norm[n]), lapack_const(uplo[u]), min(M,N), A, LDA, work); printf("Lapack %e, Plasma %e\n", normlapack, normplasma); result = fabs(normplasma - normlapack) / (normlapack * eps); switch(norm[n]) { case PlasmaMaxNorm: /* result should be perfectly equal */ break; case PlasmaInfNorm: /* Sum order on the line can differ */ result = result / (double)N; break; case PlasmaOneNorm: /* Sum order on the column can differ */ result = result / (double)M; break; case PlasmaFrobeniusNorm: /* Sum oreder on every element can differ */ result = result / ((double)M * (double)N); break; } printf("***************************************************\n"); if ( result < 1. ) { printf(" ---- TESTING DLANSY (%s, %s)......... PASSED !\n", normstr[n], uplostr[u]); } else { printf(" - TESTING DLANSY (%s, %s)... FAILED !\n", normstr[n], uplostr[u]); } printf("***************************************************\n"); } } #ifdef COMPLEX /* PLASMA DLANSY */ { int j; for (j=0; j<min(M,N); j++) { A[j*LDA+j] -= I*cimag(A[j*LDA+j]); } } for(n=0; n<4; n++) { for(u=0; u<2; u++) { normplasma = PLASMA_dlansy(norm[n], uplo[u], min(M,N), A, LDA); normlapack = LAPACKE_dlansy_work(LAPACK_COL_MAJOR, lapack_const(norm[n]), lapack_const(uplo[u]), min(M,N), A, LDA, work); printf("Lapack %e, Plasma %e\n", normlapack, normplasma); result = fabs(normplasma - normlapack) / (normlapack * eps); switch(norm[n]) { case PlasmaMaxNorm: /* result should be perfectly equal */ break; case PlasmaInfNorm: /* Sum order on the line can differ */ result = result / (double)N; break; case PlasmaOneNorm: /* Sum order on the column can differ */ result = result / (double)M; break; case PlasmaFrobeniusNorm: /* Sum oreder on every element can differ */ result = result / ((double)M * (double)N); break; } printf("***************************************************\n"); if ( result < 1. ) { printf(" ---- TESTING DLANSY (%s, %s)......... PASSED !\n", normstr[n], uplostr[u]); } else { printf(" - TESTING DLANSY (%s, %s)... FAILED !\n", normstr[n], uplostr[u]); } printf("***************************************************\n"); } } #endif free(A); free(work); return 0; }
GB_msort_1.c
//------------------------------------------------------------------------------ // GB_msort_1: sort a 1-by-n list of integers //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // A parallel mergesort of an array of 1-by-n integers. #include "GB_msort_1.h" //------------------------------------------------------------------------------ // GB_msort_1_binary_search: binary search for the pivot //------------------------------------------------------------------------------ // The Pivot value is Y [pivot], and a binary search for the Pivot is made in // the array X [p_pstart...p_end-1], which is sorted in non-decreasing order on // input. The return value is pleft, where // // X [p_start ... pleft-1] <= Pivot and // X [pleft ... p_end-1] >= Pivot holds. // // pleft is returned in the range p_start to p_end. If pleft is p_start, then // the Pivot is smaller than all entries in X [p_start...p_end-1], and the left // list X [p_start...pleft-1] is empty. If pleft is p_end, then the Pivot is // larger than all entries in X [p_start...p_end-1], and the right list X // [pleft...p_end-1] is empty. static int64_t GB_msort_1_binary_search // return pleft ( const int64_t *restrict Y_0, // Pivot is Y [pivot] const int64_t pivot, const int64_t *restrict X_0, // search in X [p_start..p_end_-1] const int64_t p_start, const int64_t p_end ) { //-------------------------------------------------------------------------- // find where the Pivot appears in X //-------------------------------------------------------------------------- // binary search of X [p_start...p_end-1] for the Pivot int64_t pleft = p_start ; int64_t pright = p_end - 1 ; while (pleft < pright) { int64_t pmiddle = (pleft + pright) >> 1 ; // less = (X [pmiddle] < Pivot) bool less = GB_lt_1 (X_0, pmiddle, Y_0, pivot) ; pleft = less ? (pmiddle+1) : pleft ; pright = less ? pright : pmiddle ; } // binary search is narrowed down to a single item // or it has found the list is empty: ASSERT (pleft == pright || pleft == pright + 1) ; // If found is true then X [pleft == pright] == Pivot. If duplicates // appear then X [pleft] is any one of the entries equal to the Pivot // in the list. If found is false then // X [p_start ... pleft-1] < Pivot and // X [pleft+1 ... p_end-1] > Pivot holds. // The value X [pleft] may be either < or > Pivot. bool found = (pleft == pright) && GB_eq_1 (X_0, pleft, Y_0, pivot) ; // Modify pleft and pright: if (!found && (pleft == pright)) { if (GB_lt_1 (X_0, pleft, Y_0, pivot)) { pleft++ ; } else { // pright++ ; // (not needed) } } //-------------------------------------------------------------------------- // return result //-------------------------------------------------------------------------- // If found is false then // X [p_start ... pleft-1] < Pivot and // X [pleft ... p_end-1] > Pivot holds, // and pleft-1 == pright // If X has no duplicates, then whether or not Pivot is found, // X [p_start ... pleft-1] < Pivot and // X [pleft ... p_end-1] >= Pivot holds. // If X has duplicates, then whether or not Pivot is found, // X [p_start ... pleft-1] <= Pivot and // X [pleft ... p_end-1] >= Pivot holds. return (pleft) ; } //------------------------------------------------------------------------------ // GB_msort_1_create_merge_tasks //------------------------------------------------------------------------------ // Recursively constructs ntasks tasks to merge two arrays, Left and Right, // into Sresult, where Left is L [pL_start...pL_end-1], Right is R // [pR_start...pR_end-1], and Sresult is S [pS_start...pS_start+total_work-1], // and where total_work is the total size of Left and Right. // // Task tid will merge L [L_task [tid] ... L_task [tid] + L_len [tid] - 1] and // R [R_task [tid] ... R_task [tid] + R_len [tid] -1] into the merged output // array S [S_task [tid] ... ]. The task tids created are t0 to // t0+ntasks-1. void GB_msort_1_create_merge_tasks ( // output: int64_t *restrict L_task, // L_task [t0...t0+ntasks-1] computed int64_t *restrict L_len, // L_len [t0...t0+ntasks-1] computed int64_t *restrict R_task, // R_task [t0...t0+ntasks-1] computed int64_t *restrict R_len, // R_len [t0...t0+ntasks-1] computed int64_t *restrict S_task, // S_task [t0...t0+ntasks-1] computed // input: const int t0, // first task tid to create const int ntasks, // # of tasks to create const int64_t pS_start, // merge into S [pS_start...] const int64_t *restrict L_0, // Left = L [pL_start...pL_end-1] const int64_t pL_start, const int64_t pL_end, const int64_t *restrict R_0, // Right = R [pR_start...pR_end-1] const int64_t pR_start, const int64_t pR_end ) { //-------------------------------------------------------------------------- // get problem size //-------------------------------------------------------------------------- int64_t nleft = pL_end - pL_start ; // size of Left array int64_t nright = pR_end - pR_start ; // size of Right array int64_t total_work = nleft + nright ; // total work to do ASSERT (ntasks >= 1) ; ASSERT (total_work > 0) ; //-------------------------------------------------------------------------- // create the tasks //-------------------------------------------------------------------------- if (ntasks == 1) { //---------------------------------------------------------------------- // a single task will merge all of Left and Right into Sresult //---------------------------------------------------------------------- L_task [t0] = pL_start ; L_len [t0] = nleft ; R_task [t0] = pR_start ; R_len [t0] = nright ; S_task [t0] = pS_start ; } else { //---------------------------------------------------------------------- // partition the Left and Right arrays for multiple merge tasks //---------------------------------------------------------------------- int64_t pleft, pright ; if (nleft >= nright) { // split Left in half, and search for its pivot in Right pleft = (pL_end + pL_start) >> 1 ; pright = GB_msort_1_binary_search ( L_0, pleft, R_0, pR_start, pR_end) ; } else { // split Right in half, and search for its pivot in Left pright = (pR_end + pR_start) >> 1 ; pleft = GB_msort_1_binary_search ( R_0, pright, L_0, pL_start, pL_end) ; } //---------------------------------------------------------------------- // partition the tasks according to the work of each partition //---------------------------------------------------------------------- // work0 is the total work in the first partition int64_t work0 = (pleft - pL_start) + (pright - pR_start) ; int ntasks0 = (int) round ((double) ntasks * (((double) work0) / ((double) total_work))) ; // ensure at least one task is assigned to each partition ntasks0 = GB_IMAX (ntasks0, 1) ; ntasks0 = GB_IMIN (ntasks0, ntasks-1) ; int ntasks1 = ntasks - ntasks0 ; //---------------------------------------------------------------------- // assign ntasks0 to the first half //---------------------------------------------------------------------- // ntasks0 tasks merge L [pL_start...pleft-1] and R [pR_start..pright-1] // into the result S [pS_start...work0-1]. GB_msort_1_create_merge_tasks ( L_task, L_len, R_task, R_len, S_task, t0, ntasks0, pS_start, L_0, pL_start, pleft, R_0, pR_start, pright) ; //---------------------------------------------------------------------- // assign ntasks1 to the second half //---------------------------------------------------------------------- // ntasks1 tasks merge L [pleft...pL_end-1] and R [pright...pR_end-1] // into the result S [pS_start+work0...pS_start+total_work]. int t1 = t0 + ntasks0 ; // first task id of the second set of tasks int64_t pS_start1 = pS_start + work0 ; // 2nd set starts here in S GB_msort_1_create_merge_tasks ( L_task, L_len, R_task, R_len, S_task, t1, ntasks1, pS_start1, L_0, pleft, pL_end, R_0, pright, pR_end) ; } } //------------------------------------------------------------------------------ // GB_msort_1_merge: merge two sorted lists via a single thread //------------------------------------------------------------------------------ // merge Left [0..nleft-1] and Right [0..nright-1] into S [0..nleft+nright-1] */ static void GB_msort_1_merge ( int64_t *restrict S_0, // output of length nleft + nright const int64_t *restrict Left_0, // left input of length nleft const int64_t nleft, const int64_t *restrict Right_0, // right input of length nright const int64_t nright ) { int64_t p, pleft, pright ; // merge the two inputs, Left and Right, while both inputs exist for (p = 0, pleft = 0, pright = 0 ; pleft < nleft && pright < nright ; p++) { if (GB_lt_1 (Left_0, pleft, Right_0, pright)) { // S [p] = Left [pleft++] S_0 [p] = Left_0 [pleft] ; pleft++ ; } else { // S [p] = Right [pright++] S_0 [p] = Right_0 [pright] ; pright++ ; } } // either input is exhausted; copy the remaining list into S if (pleft < nleft) { int64_t nremaining = (nleft - pleft) ; memcpy (S_0 + p, Left_0 + pleft, nremaining * sizeof (int64_t)) ; } else if (pright < nright) { int64_t nremaining = (nright - pright) ; memcpy (S_0 + p, Right_0 + pright, nremaining * sizeof (int64_t)) ; } } //------------------------------------------------------------------------------ // GB_msort_1: parallel mergesort //------------------------------------------------------------------------------ GB_PUBLIC GrB_Info GB_msort_1 // sort array A of size 1-by-n ( int64_t *restrict A_0, // size n array const int64_t n, int nthreads // # of threads to use ) { //-------------------------------------------------------------------------- // handle small problems with a single thread //-------------------------------------------------------------------------- if (nthreads <= 1 || n <= GB_BASECASE) { // sequential quicksort GB_qsort_1 (A_0, n) ; return (GrB_SUCCESS) ; } //-------------------------------------------------------------------------- // determine # of tasks //-------------------------------------------------------------------------- // determine the number of levels to create, which must always be an // even number. The # of levels is chosen to ensure that the # of leaves // of the task tree is between 4*nthreads and 16*nthreads. // 2 to 4 threads: 4 levels, 16 qsort leaves // 5 to 16 threads: 6 levels, 64 qsort leaves // 17 to 64 threads: 8 levels, 256 qsort leaves // 65 to 256 threads: 10 levels, 1024 qsort leaves // 256 to 1024 threads: 12 levels, 4096 qsort leaves // ... int k = (int) (2 + 2 * ceil (log2 ((double) nthreads) / 2)) ; int ntasks = 1 << k ; //-------------------------------------------------------------------------- // allocate workspace //-------------------------------------------------------------------------- int64_t *restrict W = NULL ; size_t W_size = 0 ; W = GB_MALLOC_WERK (n + 6*ntasks + 1, int64_t, &W_size) ; if (W == NULL) { // out of memory return (GrB_OUT_OF_MEMORY) ; } int64_t *T = W ; int64_t *restrict W_0 = T ; T += n ; int64_t *restrict L_task = T ; T += ntasks ; int64_t *restrict L_len = T ; T += ntasks ; int64_t *restrict R_task = T ; T += ntasks ; int64_t *restrict R_len = T ; T += ntasks ; int64_t *restrict S_task = T ; T += ntasks ; int64_t *restrict Slice = T ; T += (ntasks+1) ; //-------------------------------------------------------------------------- // partition and sort the leaves //-------------------------------------------------------------------------- GB_eslice (Slice, n, ntasks) ; int tid ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (tid = 0 ; tid < ntasks ; tid++) { int64_t leaf = Slice [tid] ; int64_t leafsize = Slice [tid+1] - leaf ; GB_qsort_1 (A_0 + leaf, leafsize) ; } //-------------------------------------------------------------------------- // merge each level //-------------------------------------------------------------------------- int nt = 1 ; for ( ; k >= 2 ; k -= 2) { //---------------------------------------------------------------------- // merge level k into level k-1, from A into W //---------------------------------------------------------------------- // TODO: skip k and k-1 for each group of 4 sublists of A if they are // already sorted with respect to each other. // this could be done in parallel if ntasks was large for (int tid = 0 ; tid < ntasks ; tid += 2*nt) { // create 2*nt tasks to merge two A sublists into one W sublist GB_msort_1_create_merge_tasks ( L_task, L_len, R_task, R_len, S_task, tid, 2*nt, Slice [tid], A_0, Slice [tid], Slice [tid+nt], A_0, Slice [tid+nt], Slice [tid+2*nt]) ; } #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (tid = 0 ; tid < ntasks ; tid++) { // merge A [pL...pL+nL-1] and A [pR...pR+nR-1] into W [pS..] int64_t pL = L_task [tid], nL = L_len [tid] ; int64_t pR = R_task [tid], nR = R_len [tid] ; int64_t pS = S_task [tid] ; GB_msort_1_merge ( W_0 + pS, A_0 + pL, nL, A_0 + pR, nR) ; } nt = 2*nt ; //---------------------------------------------------------------------- // merge level k-1 into level k-2, from W into A //---------------------------------------------------------------------- // this could be done in parallel if ntasks was large for (int tid = 0 ; tid < ntasks ; tid += 2*nt) { // create 2*nt tasks to merge two W sublists into one A sublist GB_msort_1_create_merge_tasks ( L_task, L_len, R_task, R_len, S_task, tid, 2*nt, Slice [tid], W_0, Slice [tid], Slice [tid+nt], W_0, Slice [tid+nt], Slice [tid+2*nt]) ; } #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (tid = 0 ; tid < ntasks ; tid++) { // merge A [pL...pL+nL-1] and A [pR...pR+nR-1] into W [pS..] int64_t pL = L_task [tid], nL = L_len [tid] ; int64_t pR = R_task [tid], nR = R_len [tid] ; int64_t pS = S_task [tid] ; GB_msort_1_merge ( A_0 + pS, W_0 + pL, nL, W_0 + pR, nR) ; } nt = 2*nt ; } //-------------------------------------------------------------------------- // free workspace and return result //-------------------------------------------------------------------------- GB_FREE_WERK (&W, W_size) ; return (GrB_SUCCESS) ; }
user_defined_move_generator.h
/*****************************************************************************/ // Copyright (c) 2020-2021 Yuji KOGUMA // Released under the MIT license // https://opensource.org/licenses/mit-license.php /*****************************************************************************/ #ifndef PRINTEMPS_NEIGHBORHOOD_USER_DEFINED_MOVE_GENERATOR_H__ #define PRINTEMPS_NEIGHBORHOOD_USER_DEFINED_MOVE_GENERATOR_H__ #include "abstract_move_generator.h" namespace printemps { namespace neighborhood { /*****************************************************************************/ template <class T_Variable, class T_Expression> class UserDefinedMoveGenerator : public AbstractMoveGenerator<T_Variable, T_Expression> { private: std::function<void(std::vector<Move<T_Variable, T_Expression>> *)> m_move_updater_wrapper; public: /*************************************************************************/ UserDefinedMoveGenerator(void) { this->initialize(); } /*************************************************************************/ virtual ~UserDefinedMoveGenerator(void) { /// nothing to do } /*************************************************************************/ inline constexpr void initialize(void) { this->m_move_updater_wrapper = [](std::vector<Move<T_Variable, T_Expression>> *) {}; } /*************************************************************************/ inline constexpr void set_move_updater( const std::function<void(std::vector<Move<T_Variable, T_Expression>> *)> &a_MOVE_UPDATER) { this->m_move_updater_wrapper = a_MOVE_UPDATER; } /*************************************************************************/ void setup(void) { auto move_updater = // [this](auto * a_moves_ptr, // auto * a_flags, // const bool a_ACCEPT_ALL, // const bool a_ACCEPT_OBJECTIVE_IMPROVABLE, // const bool a_ACCEPT_FEASIBILITY_IMPROVABLE, // [[maybe_unused]] const bool a_IS_ENABLED_PARALLEL) { m_move_updater_wrapper(a_moves_ptr); const int MOVES_SIZE = a_moves_ptr->size(); a_flags->resize(MOVES_SIZE); #ifdef _OPENMP #pragma omp parallel for if (a_IS_ENABLED_PARALLEL) schedule(static) #endif for (auto i = 0; i < MOVES_SIZE; i++) { (*a_moves_ptr)[i].sense = MoveSense::UserDefined; (*a_flags)[i] = 1; if (neighborhood::has_fixed_variable((*a_moves_ptr)[i])) { (*a_flags)[i] = 0; continue; } if (neighborhood::has_selection_variable( (*a_moves_ptr)[i])) { (*a_flags)[i] = 0; continue; } if (neighborhood::has_bound_violation((*a_moves_ptr)[i])) { (*a_flags)[i] = 0; continue; } if (a_ACCEPT_ALL) { /** nothing to do */ } else { if (a_ACCEPT_OBJECTIVE_IMPROVABLE && neighborhood::has_objective_improvable_variable( (*a_moves_ptr)[i])) { continue; } if (a_ACCEPT_FEASIBILITY_IMPROVABLE && neighborhood::has_feasibility_improvable_variable( (*a_moves_ptr)[i])) { continue; } (*a_flags)[i] = 0; } } }; this->m_move_updater = move_updater; } }; } // namespace neighborhood } // namespace printemps #endif /*****************************************************************************/ // END /*****************************************************************************/
network_simplex_simple.h
/* -*- mode: C++; indent-tabs-mode: nil; -*- * * * This file has been adapted by Nicolas Bonneel (2013), * from network_simplex.h from LEMON, a generic C++ optimization library, * to implement a lightweight network simplex for mass transport, more * memory efficient than the original file. A previous version of this file * is used as part of the Displacement Interpolation project, * Web: http://www.cs.ubc.ca/labs/imager/tr/2011/DisplacementInterpolation/ * * Revisions: * March 2015: added OpenMP parallelization * March 2017: included Antoine Rolet's trick to make it more robust * April 2018: IMPORTANT bug fix + uses 64bit integers (slightly slower but less risks of overflows), updated to a newer version of the algo by LEMON, sparse flow by default + minor edits. * * **** Original file Copyright Notice : * * Copyright (C) 2003-2010 * Egervary Jeno Kombinatorikus Optimalizalasi Kutatocsoport * (Egervary Research Group on Combinatorial Optimization, EGRES). * * Permission to use, modify and distribute this software is granted * provided that this copyright notice appears in all copies. For * precise terms see the accompanying LICENSE file. * * This software is provided "AS IS" with no warranty of any kind, * express or implied, and with no claim as to its suitability for any * purpose. * */ #ifndef LEMON_NETWORK_SIMPLEX_SIMPLE_H #define LEMON_NETWORK_SIMPLEX_SIMPLE_H /// \ingroup min_cost_flow_algs /// /// \file /// \brief Network Simplex algorithm for finding a minimum cost flow. // if your compiler has troubles with unorderedmaps, just comment the following line to use a slower std::map instead #define HASHMAP // now handled with unorderedmaps instead of stdext::hash_map. Should be better supported. #define SPARSE_FLOW // a sparse flow vector will be 10-15% slower for small problems but uses less memory and becomes faster for large problems (40k total nodes) #include <vector> #include <limits> #include <algorithm> #ifdef HASHMAP #include <unordered_map> #else #include <map> #endif //#include "core.h" //#include "lmath.h" #include <omp.h> #include <cmath> //#include "sparse_array_n.h" #include "full_bipartitegraph.h" #define INVALIDNODE -1 #define INVALID (-1) namespace lemon { template <typename T> class ProxyObject; template<typename T> class SparseValueVector { public: SparseValueVector(size_t n = 0) // parameter n for compatibility with standard vectors { } void resize(size_t n = 0) {}; T operator[](const size_t id) const { #ifdef HASHMAP typename std::unordered_map<size_t, T>::const_iterator it = data.find(id); #else typename std::map<size_t, T>::const_iterator it = data.find(id); #endif if (it == data.end()) return 0; else return it->second; } ProxyObject<T> operator[](const size_t id) { return ProxyObject<T>(this, id); } //private: #ifdef HASHMAP std::unordered_map<size_t, T> data; #else std::map<size_t, T> data; #endif }; template <typename T> class ProxyObject { public: ProxyObject(SparseValueVector<T> *v, size_t idx) { _v = v; _idx = idx; }; ProxyObject<T> & operator=(const T &v) { // If we get here, we know that operator[] was called to perform a write access, // so we can insert an item in the vector if needed if (v != 0) _v->data[_idx] = v; return *this; } operator T() { // If we get here, we know that operator[] was called to perform a read access, // so we can simply return the existing object #ifdef HASHMAP typename std::unordered_map<size_t, T>::iterator it = _v->data.find(_idx); #else typename std::map<size_t, T>::iterator it = _v->data.find(_idx); #endif if (it == _v->data.end()) return 0; else return it->second; } void operator+=(T val) { if (val == 0) return; #ifdef HASHMAP typename std::unordered_map<size_t, T>::iterator it = _v->data.find(_idx); #else typename std::map<size_t, T>::iterator it = _v->data.find(_idx); #endif if (it == _v->data.end()) _v->data[_idx] = val; else { T sum = it->second + val; if (sum == 0) _v->data.erase(it); else it->second = sum; } } void operator-=(T val) { if (val == 0) return; #ifdef HASHMAP typename std::unordered_map<size_t, T>::iterator it = _v->data.find(_idx); #else typename std::map<size_t, T>::iterator it = _v->data.find(_idx); #endif if (it == _v->data.end()) _v->data[_idx] = -val; else { T sum = it->second - val; if (sum == 0) _v->data.erase(it); else it->second = sum; } } SparseValueVector<T> *_v; size_t _idx; }; /// \addtogroup min_cost_flow_algs /// @{ /// \brief Implementation of the primal Network Simplex algorithm /// for finding a \ref min_cost_flow "minimum cost flow". /// /// \ref NetworkSimplexSimple implements the primal Network Simplex algorithm /// for finding a \ref min_cost_flow "minimum cost flow" /// \ref amo93networkflows, \ref dantzig63linearprog, /// \ref kellyoneill91netsimplex. /// This algorithm is a highly efficient specialized version of the /// linear programming simplex method directly for the minimum cost /// flow problem. /// /// In general, %NetworkSimplexSimple is the fastest implementation available /// in LEMON for this problem. /// Moreover, it supports both directions of the supply/demand inequality /// constraints. For more information, see \ref SupplyType. /// /// Most of the parameters of the problem (except for the digraph) /// can be given using separate functions, and the algorithm can be /// executed using the \ref run() function. If some parameters are not /// specified, then default values will be used. /// /// \tparam GR The digraph type the algorithm runs on. /// \tparam V The number type used for flow amounts, capacity bounds /// and supply values in the algorithm. By default, it is \c int. /// \tparam C The number type used for costs and potentials in the /// algorithm. By default, it is the same as \c V. /// /// \warning Both number types must be signed and all input data must /// be integer. /// /// \note %NetworkSimplexSimple provides five different pivot rule /// implementations, from which the most efficient one is used /// by default. For more information, see \ref PivotRule. template <typename GR, typename V = int, typename C = V, typename ArcsType = int64_t> class NetworkSimplexSimple { public: /// \brief Constructor. /// /// The constructor of the class. /// /// \param graph The digraph the algorithm runs on. /// \param arc_mixing Indicate if the arcs have to be stored in a /// mixed order in the internal data structure. /// In special cases, it could lead to better overall performance, /// but it is usually slower. Therefore it is disabled by default. NetworkSimplexSimple(const GR& graph, bool arc_mixing, int nbnodes, ArcsType nb_arcs, size_t maxiters = 0) : _graph(graph), //_arc_id(graph), _arc_mixing(arc_mixing), _init_nb_nodes(nbnodes), _init_nb_arcs(nb_arcs), MAX(std::numeric_limits<Value>::max()), INF(std::numeric_limits<Value>::has_infinity ? std::numeric_limits<Value>::infinity() : MAX) { // Reset data structures reset(); max_iter = maxiters; } /// The type of the flow amounts, capacity bounds and supply values typedef V Value; /// The type of the arc costs typedef C Cost; public: /// \brief Problem type constants for the \c run() function. /// /// Enum type containing the problem type constants that can be /// returned by the \ref run() function of the algorithm. enum ProblemType { /// The problem has no feasible solution (flow). INFEASIBLE, /// The problem has optimal solution (i.e. it is feasible and /// bounded), and the algorithm has found optimal flow and node /// potentials (primal and dual solutions). OPTIMAL, /// The objective function of the problem is unbounded, i.e. /// there is a directed cycle having negative total cost and /// infinite upper bound. UNBOUNDED }; /// \brief Constants for selecting the type of the supply constraints. /// /// Enum type containing constants for selecting the supply type, /// i.e. the direction of the inequalities in the supply/demand /// constraints of the \ref min_cost_flow "minimum cost flow problem". /// /// The default supply type is \c GEQ, the \c LEQ type can be /// selected using \ref supplyType(). /// The equality form is a special case of both supply types. enum SupplyType { /// This option means that there are <em>"greater or equal"</em> /// supply/demand constraints in the definition of the problem. GEQ, /// This option means that there are <em>"less or equal"</em> /// supply/demand constraints in the definition of the problem. LEQ }; private: size_t max_iter; TEMPLATE_DIGRAPH_TYPEDEFS(GR); typedef std::vector<int> IntVector; typedef std::vector<ArcsType> ArcVector; typedef std::vector<Value> ValueVector; typedef std::vector<Cost> CostVector; // typedef SparseValueVector<Cost> CostVector; typedef std::vector<char> BoolVector; // Note: vector<char> is used instead of vector<bool> for efficiency reasons // State constants for arcs enum ArcState { STATE_UPPER = -1, STATE_TREE = 0, STATE_LOWER = 1 }; typedef std::vector<signed char> StateVector; // Note: vector<signed char> is used instead of vector<ArcState> for // efficiency reasons private: // Data related to the underlying digraph const GR &_graph; int _node_num; ArcsType _arc_num; ArcsType _all_arc_num; ArcsType _search_arc_num; // Parameters of the problem SupplyType _stype; Value _sum_supply; inline int _node_id(int n) const { return _node_num - n - 1; }; //IntArcMap _arc_id; IntVector _source; // keep nodes as integers IntVector _target; bool _arc_mixing; // Node and arc data CostVector _cost; ValueVector _supply; #ifdef SPARSE_FLOW SparseValueVector<Value> _flow; #else ValueVector _flow; #endif CostVector _pi; // Data for storing the spanning tree structure IntVector _parent; ArcVector _pred; IntVector _thread; IntVector _rev_thread; IntVector _succ_num; IntVector _last_succ; IntVector _dirty_revs; BoolVector _forward; StateVector _state; ArcsType _root; // Temporary data used in the current pivot iteration ArcsType in_arc, join, u_in, v_in, u_out, v_out; ArcsType first, second, right, last; ArcsType stem, par_stem, new_stem; Value delta; const Value MAX; ArcsType mixingCoeff; public: /// \brief Constant for infinite upper bounds (capacities). /// /// Constant for infinite upper bounds (capacities). /// It is \c std::numeric_limits<Value>::infinity() if available, /// \c std::numeric_limits<Value>::max() otherwise. const Value INF; private: // thank you to DVK and MizardX from StackOverflow for this function! inline ArcsType sequence(ArcsType k) const { ArcsType smallv = (k > num_total_big_subsequence_numbers) & 1; k -= num_total_big_subsequence_numbers * smallv; ArcsType subsequence_length2 = subsequence_length - smallv; ArcsType subsequence_num = (k / subsequence_length2) + num_big_subsequences * smallv; ArcsType subsequence_offset = (k % subsequence_length2) * mixingCoeff; return subsequence_offset + subsequence_num; } ArcsType subsequence_length; ArcsType num_big_subsequences; ArcsType num_total_big_subsequence_numbers; inline ArcsType getArcID(const Arc &arc) const { //int n = _arc_num-arc._id-1; ArcsType n = _arc_num - GR::id(arc) - 1; //ArcsType a = mixingCoeff*(n%mixingCoeff) + n/mixingCoeff; //ArcsType b = _arc_id[arc]; if (_arc_mixing) return sequence(n); else return n; } // finally unused because too slow inline ArcsType getSource(const ArcsType arc) const { //ArcsType a = _source[arc]; //return a; ArcsType n = _arc_num - arc - 1; if (_arc_mixing) n = mixingCoeff*(n%mixingCoeff) + n / mixingCoeff; ArcsType b; if (n >= 0) b = _node_id(_graph.source(GR::arcFromId(n))); else { n = arc + 1 - _arc_num; if (n <= _node_num) b = _node_num; else if (n >= _graph._n1) b = _graph._n1; else b = _graph._n1 - n; } return b; } // Implementation of the Block Search pivot rule class BlockSearchPivotRule { private: // References to the NetworkSimplexSimple class const IntVector &_source; const IntVector &_target; const CostVector &_cost; const StateVector &_state; const CostVector &_pi; ArcsType &_in_arc; ArcsType _search_arc_num; // Pivot rule data ArcsType _block_size; ArcsType _next_arc; NetworkSimplexSimple &_ns; public: // Constructor BlockSearchPivotRule(NetworkSimplexSimple &ns) : _source(ns._source), _target(ns._target), _cost(ns._cost), _state(ns._state), _pi(ns._pi), _in_arc(ns.in_arc), _search_arc_num(ns._search_arc_num), _next_arc(0), _ns(ns) { // The main parameters of the pivot rule const double BLOCK_SIZE_FACTOR = 1; const ArcsType MIN_BLOCK_SIZE = 10; _block_size = std::max(ArcsType(BLOCK_SIZE_FACTOR * std::sqrt(double(_search_arc_num))), MIN_BLOCK_SIZE); } // Find next entering arc bool findEnteringArc() { Cost min_val = 0; ArcsType N = omp_get_max_threads(); std::vector<Cost> minArray(N, 0); std::vector<ArcsType> arcId(N); ArcsType bs = (ArcsType)ceil(_block_size / (double)N); for (ArcsType i = 0; i < _search_arc_num; i += _block_size) { ArcsType e; ArcsType j; #pragma omp parallel { int t = omp_get_thread_num(); #pragma omp for schedule(static, bs) lastprivate(e) for (j = 0; j < std::min(i + _block_size, _search_arc_num) - i; j++) { e = (_next_arc + i + j); if (e >= _search_arc_num) e -= _search_arc_num; Cost c = _state[e] * (_cost[e] + _pi[_source[e]] - _pi[_target[e]]); if (c < minArray[t]) { minArray[t] = c; arcId[t] = e; } } } for (int j = 0; j < N; j++) { if (minArray[j] < min_val) { min_val = minArray[j]; _in_arc = arcId[j]; } } Cost a = std::abs(_pi[_source[_in_arc]]) > std::abs(_pi[_target[_in_arc]]) ? std::abs(_pi[_source[_in_arc]]) : std::abs(_pi[_target[_in_arc]]); a = a > std::abs(_cost[_in_arc]) ? a : std::abs(_cost[_in_arc]); if (min_val < -std::numeric_limits<Cost>::epsilon()*a) { _next_arc = e; return true; } } Cost a = fabs(_pi[_source[_in_arc]]) > fabs(_pi[_target[_in_arc]]) ? fabs(_pi[_source[_in_arc]]) : fabs(_pi[_target[_in_arc]]); a = a > fabs(_cost[_in_arc]) ? a : fabs(_cost[_in_arc]); if (min_val >= -std::numeric_limits<Cost>::epsilon()*a) return false; return true; } // Find next entering arc /*bool findEnteringArc() { Cost min_val = 0; int N = omp_get_max_threads(); std::vector<Cost> minArray(N); std::vector<ArcsType> arcId(N); ArcsType bs = (ArcsType)ceil(_block_size / (double)N); for (ArcsType i = 0; i < _search_arc_num; i += _block_size) { ArcsType maxJ = std::min(i + _block_size, _search_arc_num) - i; ArcsType j; #pragma omp parallel { int t = omp_get_thread_num(); Cost minV = 0; ArcsType arcStart = _next_arc + i; ArcsType arc = -1; #pragma omp for schedule(static, bs) for (j = 0; j < maxJ; j++) { ArcsType e = arcStart + j; if (e >= _search_arc_num) e -= _search_arc_num; Cost c = _state[e] * (_cost[e] + _pi[_source[e]] - _pi[_target[e]]); if (c < minV) { minV = c; arc = e; } } minArray[t] = minV; arcId[t] = arc; } for (int j = 0; j < N; j++) { if (minArray[j] < min_val) { min_val = minArray[j]; _in_arc = arcId[j]; } } //FIX by Antoine Rolet to avoid precision issues Cost a = std::max(std::abs(_cost[_in_arc]), std::max(std::abs(_pi[_source[_in_arc]]), std::abs(_pi[_target[_in_arc]]))); if (min_val <-std::numeric_limits<Cost>::epsilon()*a) { _next_arc = _next_arc + i + maxJ - 1; if (_next_arc >= _search_arc_num) _next_arc -= _search_arc_num; return true; } } if (min_val >= 0) { return false; } return true; }*/ /*bool findEnteringArc() { Cost c, min = 0; int cnt = _block_size; int e, min_arc = _next_arc; for (e = _next_arc; e < _search_arc_num; ++e) { c = _state[e] * (_cost[e] + _pi[_source[e]] - _pi[_target[e]]); if (c < min) { min = c; min_arc = e; } if (--cnt == 0) { if (min < 0) break; cnt = _block_size; } } if (min == 0 || cnt > 0) { for (e = 0; e < _next_arc; ++e) { c = _state[e] * (_cost[e] + _pi[_source[e]] - _pi[_target[e]]); if (c < min) { min = c; min_arc = e; } if (--cnt == 0) { if (min < 0) break; cnt = _block_size; } } } if (min >= 0) return false; _in_arc = min_arc; _next_arc = e; return true; }*/ }; //class BlockSearchPivotRule public: int _init_nb_nodes; ArcsType _init_nb_arcs; /// \name Parameters /// The parameters of the algorithm can be specified using these /// functions. /// @{ /// \brief Set the costs of the arcs. /// /// This function sets the costs of the arcs. /// If it is not used before calling \ref run(), the costs /// will be set to \c 1 on all arcs. /// /// \param map An arc map storing the costs. /// Its \c Value type must be convertible to the \c Cost type /// of the algorithm. /// /// \return <tt>(*this)</tt> template<typename CostMap> NetworkSimplexSimple& costMap(const CostMap& map) { Arc a; _graph.first(a); for (; a != INVALID; _graph.next(a)) { _cost[getArcID(a)] = map[a]; } return *this; } /// \brief Set the costs of one arc. /// /// This function sets the costs of one arcs. /// Done for memory reasons /// /// \param arc An arc. /// \param arc A cost /// /// \return <tt>(*this)</tt> template<typename Value> NetworkSimplexSimple& setCost(const Arc& arc, const Value cost) { _cost[getArcID(arc)] = cost; return *this; } /// \brief Set the supply values of the nodes. /// /// This function sets the supply values of the nodes. /// If neither this function nor \ref stSupply() is used before /// calling \ref run(), the supply of each node will be set to zero. /// /// \param map A node map storing the supply values. /// Its \c Value type must be convertible to the \c Value type /// of the algorithm. /// /// \return <tt>(*this)</tt> template<typename SupplyMap> NetworkSimplexSimple& supplyMap(const SupplyMap& map) { Node n; _graph.first(n); for (; n != INVALIDNODE; _graph.next(n)) { _supply[_node_id(n)] = map[n]; } return *this; } template<typename SupplyMap> NetworkSimplexSimple& supplyMap(const SupplyMap* map1, int n1, const SupplyMap* map2, int n2) { Node n; _graph.first(n); for (; n != INVALIDNODE; _graph.next(n)) { if (n<n1) _supply[_node_id(n)] = map1[n]; else _supply[_node_id(n)] = map2[n - n1]; } return *this; } template<typename SupplyMap> NetworkSimplexSimple& supplyMapAll(SupplyMap val1, int n1, SupplyMap val2, int n2) { Node n; _graph.first(n); for (; n != INVALIDNODE; _graph.next(n)) { if (n<n1) _supply[_node_id(n)] = val1; else _supply[_node_id(n)] = val2; } return *this; } /// \brief Set single source and target nodes and a supply value. /// /// This function sets a single source node and a single target node /// and the required flow value. /// If neither this function nor \ref supplyMap() is used before /// calling \ref run(), the supply of each node will be set to zero. /// /// Using this function has the same effect as using \ref supplyMap() /// with such a map in which \c k is assigned to \c s, \c -k is /// assigned to \c t and all other nodes have zero supply value. /// /// \param s The source node. /// \param t The target node. /// \param k The required amount of flow from node \c s to node \c t /// (i.e. the supply of \c s and the demand of \c t). /// /// \return <tt>(*this)</tt> NetworkSimplexSimple& stSupply(const Node& s, const Node& t, Value k) { for (int i = 0; i != _node_num; ++i) { _supply[i] = 0; } _supply[_node_id(s)] = k; _supply[_node_id(t)] = -k; return *this; } /// \brief Set the type of the supply constraints. /// /// This function sets the type of the supply/demand constraints. /// If it is not used before calling \ref run(), the \ref GEQ supply /// type will be used. /// /// For more information, see \ref SupplyType. /// /// \return <tt>(*this)</tt> NetworkSimplexSimple& supplyType(SupplyType supply_type) { _stype = supply_type; return *this; } /// @} /// \name Execution Control /// The algorithm can be executed using \ref run(). /// @{ /// \brief Run the algorithm. /// /// This function runs the algorithm. /// The paramters can be specified using functions \ref lowerMap(), /// \ref upperMap(), \ref costMap(), \ref supplyMap(), \ref stSupply(), /// \ref supplyType(). /// For example, /// \code /// NetworkSimplexSimple<ListDigraph> ns(graph); /// ns.lowerMap(lower).upperMap(upper).costMap(cost) /// .supplyMap(sup).run(); /// \endcode /// /// This function can be called more than once. All the given parameters /// are kept for the next call, unless \ref resetParams() or \ref reset() /// is used, thus only the modified parameters have to be set again. /// If the underlying digraph was also modified after the construction /// of the class (or the last \ref reset() call), then the \ref reset() /// function must be called. /// /// \param pivot_rule The pivot rule that will be used during the /// algorithm. For more information, see \ref PivotRule. /// /// \return \c INFEASIBLE if no feasible flow exists, /// \n \c OPTIMAL if the problem has optimal solution /// (i.e. it is feasible and bounded), and the algorithm has found /// optimal flow and node potentials (primal and dual solutions), /// \n \c UNBOUNDED if the objective function of the problem is /// unbounded, i.e. there is a directed cycle having negative total /// cost and infinite upper bound. /// /// \see ProblemType, PivotRule /// \see resetParams(), reset() ProblemType run() { if (!init()) return INFEASIBLE; return start(); } /// \brief Reset all the parameters that have been given before. /// /// This function resets all the paramaters that have been given /// before using functions \ref lowerMap(), \ref upperMap(), /// \ref costMap(), \ref supplyMap(), \ref stSupply(), \ref supplyType(). /// /// It is useful for multiple \ref run() calls. Basically, all the given /// parameters are kept for the next \ref run() call, unless /// \ref resetParams() or \ref reset() is used. /// If the underlying digraph was also modified after the construction /// of the class or the last \ref reset() call, then the \ref reset() /// function must be used, otherwise \ref resetParams() is sufficient. /// /// For example, /// \code /// NetworkSimplexSimple<ListDigraph> ns(graph); /// /// // First run /// ns.lowerMap(lower).upperMap(upper).costMap(cost) /// .supplyMap(sup).run(); /// /// // Run again with modified cost map (resetParams() is not called, /// // so only the cost map have to be set again) /// cost[e] += 100; /// ns.costMap(cost).run(); /// /// // Run again from scratch using resetParams() /// // (the lower bounds will be set to zero on all arcs) /// ns.resetParams(); /// ns.upperMap(capacity).costMap(cost) /// .supplyMap(sup).run(); /// \endcode /// /// \return <tt>(*this)</tt> /// /// \see reset(), run() NetworkSimplexSimple& resetParams() { for (int i = 0; i != _node_num; ++i) { _supply[i] = 0; } for (ArcsType i = 0; i != _arc_num; ++i) { _cost[i] = 1; } _stype = GEQ; return *this; } /// \brief Reset the internal data structures and all the parameters /// that have been given before. /// /// This function resets the internal data structures and all the /// paramaters that have been given before using functions \ref lowerMap(), /// \ref upperMap(), \ref costMap(), \ref supplyMap(), \ref stSupply(), /// \ref supplyType(). /// /// It is useful for multiple \ref run() calls. Basically, all the given /// parameters are kept for the next \ref run() call, unless /// \ref resetParams() or \ref reset() is used. /// If the underlying digraph was also modified after the construction /// of the class or the last \ref reset() call, then the \ref reset() /// function must be used, otherwise \ref resetParams() is sufficient. /// /// See \ref resetParams() for examples. /// /// \return <tt>(*this)</tt> /// /// \see resetParams(), run() NetworkSimplexSimple& reset() { // Resize vectors _node_num = _init_nb_nodes; _arc_num = _init_nb_arcs; int all_node_num = _node_num + 1; ArcsType max_arc_num = _arc_num + 2 * _node_num; _source.resize(max_arc_num); _target.resize(max_arc_num); _cost.resize(max_arc_num); _supply.resize(all_node_num); _flow.resize(max_arc_num); _pi.resize(all_node_num); _parent.resize(all_node_num); _pred.resize(all_node_num); _forward.resize(all_node_num); _thread.resize(all_node_num); _rev_thread.resize(all_node_num); _succ_num.resize(all_node_num); _last_succ.resize(all_node_num); _state.resize(max_arc_num); //_arc_mixing=false; if (_arc_mixing && _node_num > 1) { // Store the arcs in a mixed order //ArcsType k = std::max(ArcsType(std::sqrt(double(_arc_num))), ArcsType(10)); const ArcsType k = std::max(ArcsType(_arc_num / _node_num), ArcsType(3)); mixingCoeff = k; subsequence_length = _arc_num / mixingCoeff + 1; num_big_subsequences = _arc_num % mixingCoeff; num_total_big_subsequence_numbers = subsequence_length * num_big_subsequences; #pragma omp parallel for schedule(static) for (Arc a = 0; a <= _graph.maxArcId(); a++) { // --a <=> _graph.next(a) , -1 == INVALID ArcsType i = sequence(_graph.maxArcId()-a); _source[i] = _node_id(_graph.source(a)); _target[i] = _node_id(_graph.target(a)); } } else { // Store the arcs in the original order ArcsType i = 0; Arc a; _graph.first(a); for (; a != INVALID; _graph.next(a), ++i) { _source[i] = _node_id(_graph.source(a)); _target[i] = _node_id(_graph.target(a)); //_arc_id[a] = i; } } // Reset parameters resetParams(); return *this; } /// @} /// \name Query Functions /// The results of the algorithm can be obtained using these /// functions.\n /// The \ref run() function must be called before using them. /// @{ /// \brief Return the total cost of the found flow. /// /// This function returns the total cost of the found flow. /// Its complexity is O(e). /// /// \note The return type of the function can be specified as a /// template parameter. For example, /// \code /// ns.totalCost<double>(); /// \endcode /// It is useful if the total cost cannot be stored in the \c Cost /// type of the algorithm, which is the default return type of the /// function. /// /// \pre \ref run() must be called before using this function. /*template <typename Number> Number totalCost() const { Number c = 0; for (ArcIt a(_graph); a != INVALID; ++a) { int i = getArcID(a); c += Number(_flow[i]) * Number(_cost[i]); } return c; }*/ template <typename Number> Number totalCost() const { Number c = 0; #ifdef SPARSE_FLOW #ifdef HASHMAP typename std::unordered_map<size_t, Value>::const_iterator it; #else typename std::map<size_t, Value>::const_iterator it; #endif for (it = _flow.data.begin(); it!=_flow.data.end(); ++it) c += Number(it->second) * Number(_cost[it->first]); return c; #else for (ArcsType i = 0; i<_flow.size(); i++) c += _flow[i] * Number(_cost[i]); return c; #endif } #ifndef DOXYGEN Cost totalCost() const { return totalCost<Cost>(); } #endif /// \brief Return the flow on the given arc. /// /// This function returns the flow on the given arc. /// /// \pre \ref run() must be called before using this function. Value flow(const Arc& a) const { return _flow[getArcID(a)]; } /// \brief Return the flow map (the primal solution). /// /// This function copies the flow value on each arc into the given /// map. The \c Value type of the algorithm must be convertible to /// the \c Value type of the map. /// /// \pre \ref run() must be called before using this function. template <typename FlowMap> void flowMap(FlowMap &map) const { Arc a; _graph.first(a); for (; a != INVALID; _graph.next(a)) { map.set(a, _flow[getArcID(a)]); } } /// \brief Return the potential (dual value) of the given node. /// /// This function returns the potential (dual value) of the /// given node. /// /// \pre \ref run() must be called before using this function. Cost potential(const Node& n) const { return _pi[_node_id(n)]; } /// \brief Return the potential map (the dual solution). /// /// This function copies the potential (dual value) of each node /// into the given map. /// The \c Cost type of the algorithm must be convertible to the /// \c Value type of the map. /// /// \pre \ref run() must be called before using this function. template <typename PotentialMap> void potentialMap(PotentialMap &map) const { Node n; _graph.first(n); for (; n != INVALID; _graph.next(n)) { map.set(n, _pi[_node_id(n)]); } } /// @} private: // Initialize internal data structures bool init() { if (_node_num == 0) return false; // Check the sum of supply values _sum_supply = 0; for (int i = 0; i != _node_num; ++i) { _sum_supply += _supply[i]; } /*if (!((_stype == GEQ && _sum_supply <= 0) || (_stype == LEQ && _sum_supply >= 0))) return false;*/ // Initialize artifical cost Cost ART_COST; if (std::numeric_limits<Cost>::is_exact) { ART_COST = std::numeric_limits<Cost>::max() / 2 + 1; } else { ART_COST = 0; for (ArcsType i = 0; i != _arc_num; ++i) { if (_cost[i] > ART_COST) ART_COST = _cost[i]; } ART_COST = (ART_COST + 1) * _node_num; } // Initialize arc maps for (ArcsType i = 0; i != _arc_num; ++i) { #ifndef SPARSE_FLOW _flow[i] = 0; //by default, the sparse matrix is empty #endif _state[i] = STATE_LOWER; } #ifdef SPARSE_FLOW _flow = SparseValueVector<Value>(); #endif // Set data for the artificial root node _root = _node_num; _parent[_root] = -1; _pred[_root] = -1; _thread[_root] = 0; _rev_thread[0] = _root; _succ_num[_root] = _node_num + 1; _last_succ[_root] = _root - 1; _supply[_root] = -_sum_supply; _pi[_root] = 0; // Add artificial arcs and initialize the spanning tree data structure if (_sum_supply == 0) { // EQ supply constraints _search_arc_num = _arc_num; _all_arc_num = _arc_num + _node_num; for (ArcsType u = 0, e = _arc_num; u != _node_num; ++u, ++e) { _parent[u] = _root; _pred[u] = e; _thread[u] = u + 1; _rev_thread[u + 1] = u; _succ_num[u] = 1; _last_succ[u] = u; _state[e] = STATE_TREE; if (_supply[u] >= 0) { _forward[u] = true; _pi[u] = 0; _source[e] = u; _target[e] = _root; _flow[e] = _supply[u]; _cost[e] = 0; } else { _forward[u] = false; _pi[u] = ART_COST; _source[e] = _root; _target[e] = u; _flow[e] = -_supply[u]; _cost[e] = ART_COST; } } } else if (_sum_supply > 0) { // LEQ supply constraints _search_arc_num = _arc_num + _node_num; ArcsType f = _arc_num + _node_num; for (ArcsType u = 0, e = _arc_num; u != _node_num; ++u, ++e) { _parent[u] = _root; _thread[u] = u + 1; _rev_thread[u + 1] = u; _succ_num[u] = 1; _last_succ[u] = u; if (_supply[u] >= 0) { _forward[u] = true; _pi[u] = 0; _pred[u] = e; _source[e] = u; _target[e] = _root; _flow[e] = _supply[u]; _cost[e] = 0; _state[e] = STATE_TREE; } else { _forward[u] = false; _pi[u] = ART_COST; _pred[u] = f; _source[f] = _root; _target[f] = u; _flow[f] = -_supply[u]; _cost[f] = ART_COST; _state[f] = STATE_TREE; _source[e] = u; _target[e] = _root; //_flow[e] = 0; //by default, the sparse matrix is empty _cost[e] = 0; _state[e] = STATE_LOWER; ++f; } } _all_arc_num = f; } else { // GEQ supply constraints _search_arc_num = _arc_num + _node_num; ArcsType f = _arc_num + _node_num; for (ArcsType u = 0, e = _arc_num; u != _node_num; ++u, ++e) { _parent[u] = _root; _thread[u] = u + 1; _rev_thread[u + 1] = u; _succ_num[u] = 1; _last_succ[u] = u; if (_supply[u] <= 0) { _forward[u] = false; _pi[u] = 0; _pred[u] = e; _source[e] = _root; _target[e] = u; _flow[e] = -_supply[u]; _cost[e] = 0; _state[e] = STATE_TREE; } else { _forward[u] = true; _pi[u] = -ART_COST; _pred[u] = f; _source[f] = u; _target[f] = _root; _flow[f] = _supply[u]; _state[f] = STATE_TREE; _cost[f] = ART_COST; _source[e] = _root; _target[e] = u; //_flow[e] = 0; //by default, the sparse matrix is empty _cost[e] = 0; _state[e] = STATE_LOWER; ++f; } } _all_arc_num = f; } return true; } // Find the join node void findJoinNode() { int u = _source[in_arc]; int v = _target[in_arc]; while (u != v) { if (_succ_num[u] < _succ_num[v]) { u = _parent[u]; } else { v = _parent[v]; } } join = u; } // Find the leaving arc of the cycle and returns true if the // leaving arc is not the same as the entering arc bool findLeavingArc() { // Initialize first and second nodes according to the direction // of the cycle if (_state[in_arc] == STATE_LOWER) { first = _source[in_arc]; second = _target[in_arc]; } else { first = _target[in_arc]; second = _source[in_arc]; } delta = INF; char result = 0; Value d; ArcsType e; // Search the cycle along the path form the first node to the root for (int u = first; u != join; u = _parent[u]) { e = _pred[u]; d = _forward[u] ? _flow[e] : INF; if (d < delta) { delta = d; u_out = u; result = 1; } } // Search the cycle along the path form the second node to the root for (int u = second; u != join; u = _parent[u]) { e = _pred[u]; d = _forward[u] ? INF : _flow[e]; if (d <= delta) { delta = d; u_out = u; result = 2; } } if (result == 1) { u_in = first; v_in = second; } else { u_in = second; v_in = first; } return result != 0; } // Change _flow and _state vectors void changeFlow(bool change) { // Augment along the cycle if (delta > 0) { Value val = _state[in_arc] * delta; _flow[in_arc] += val; for (int u = _source[in_arc]; u != join; u = _parent[u]) { _flow[_pred[u]] += _forward[u] ? -val : val; } for (int u = _target[in_arc]; u != join; u = _parent[u]) { _flow[_pred[u]] += _forward[u] ? val : -val; } } // Update the state of the entering and leaving arcs if (change) { _state[in_arc] = STATE_TREE; _state[_pred[u_out]] = (_flow[_pred[u_out]] == 0) ? STATE_LOWER : STATE_UPPER; } else { _state[in_arc] = -_state[in_arc]; } } // Update the tree structure void updateTreeStructure() { int old_rev_thread = _rev_thread[u_out]; int old_succ_num = _succ_num[u_out]; int old_last_succ = _last_succ[u_out]; v_out = _parent[u_out]; // Check if u_in and u_out coincide if (u_in == u_out) { // Update _parent, _pred, _pred_dir _parent[u_in] = v_in; _pred[u_in] = in_arc; _forward[u_in] = (u_in == _source[in_arc]); // Update _thread and _rev_thread if (_thread[v_in] != u_out) { ArcsType after = _thread[old_last_succ]; _thread[old_rev_thread] = after; _rev_thread[after] = old_rev_thread; after = _thread[v_in]; _thread[v_in] = u_out; _rev_thread[u_out] = v_in; _thread[old_last_succ] = after; _rev_thread[after] = old_last_succ; } } else { // Handle the case when old_rev_thread equals to v_in // (it also means that join and v_out coincide) int thread_continue = old_rev_thread == v_in ? _thread[old_last_succ] : _thread[v_in]; // Update _thread and _parent along the stem nodes (i.e. the nodes // between u_in and u_out, whose parent have to be changed) int stem = u_in; // the current stem node int par_stem = v_in; // the new parent of stem int next_stem; // the next stem node int last = _last_succ[u_in]; // the last successor of stem int before, after = _thread[last]; _thread[v_in] = u_in; _dirty_revs.clear(); _dirty_revs.push_back(v_in); while (stem != u_out) { // Insert the next stem node into the thread list next_stem = _parent[stem]; _thread[last] = next_stem; _dirty_revs.push_back(last); // Remove the subtree of stem from the thread list before = _rev_thread[stem]; _thread[before] = after; _rev_thread[after] = before; // Change the parent node and shift stem nodes _parent[stem] = par_stem; par_stem = stem; stem = next_stem; // Update last and after last = _last_succ[stem] == _last_succ[par_stem] ? _rev_thread[par_stem] : _last_succ[stem]; after = _thread[last]; } _parent[u_out] = par_stem; _thread[last] = thread_continue; _rev_thread[thread_continue] = last; _last_succ[u_out] = last; // Remove the subtree of u_out from the thread list except for // the case when old_rev_thread equals to v_in if (old_rev_thread != v_in) { _thread[old_rev_thread] = after; _rev_thread[after] = old_rev_thread; } // Update _rev_thread using the new _thread values for (int i = 0; i != int(_dirty_revs.size()); ++i) { int u = _dirty_revs[i]; _rev_thread[_thread[u]] = u; } // Update _pred, _pred_dir, _last_succ and _succ_num for the // stem nodes from u_out to u_in int tmp_sc = 0, tmp_ls = _last_succ[u_out]; for (int u = u_out, p = _parent[u]; u != u_in; u = p, p = _parent[u]) { _pred[u] = _pred[p]; _forward[u] = !_forward[p]; tmp_sc += _succ_num[u] - _succ_num[p]; _succ_num[u] = tmp_sc; _last_succ[p] = tmp_ls; } _pred[u_in] = in_arc; _forward[u_in] = (u_in == _source[in_arc]); _succ_num[u_in] = old_succ_num; } // Update _last_succ from v_in towards the root int up_limit_out = _last_succ[join] == v_in ? join : -1; int last_succ_out = _last_succ[u_out]; for (int u = v_in; u != -1 && _last_succ[u] == v_in; u = _parent[u]) { _last_succ[u] = last_succ_out; } // Update _last_succ from v_out towards the root if (join != old_rev_thread && v_in != old_rev_thread) { for (int u = v_out; u != up_limit_out && _last_succ[u] == old_last_succ; u = _parent[u]) { _last_succ[u] = old_rev_thread; } } else if (last_succ_out != old_last_succ) { for (int u = v_out; u != up_limit_out && _last_succ[u] == old_last_succ; u = _parent[u]) { _last_succ[u] = last_succ_out; } } // Update _succ_num from v_in to join for (int u = v_in; u != join; u = _parent[u]) { _succ_num[u] += old_succ_num; } // Update _succ_num from v_out to join for (int u = v_out; u != join; u = _parent[u]) { _succ_num[u] -= old_succ_num; } } void updatePotential() { Cost sigma = _pi[v_in] - _pi[u_in] - ((_forward[u_in])?_cost[in_arc]:(-_cost[in_arc])); int end = _thread[_last_succ[u_in]]; for (int u = u_in; u != end; u = _thread[u]) { _pi[u] += sigma; } } // Heuristic initial pivots bool initialPivots() { Value curr, total = 0; std::vector<Node> supply_nodes, demand_nodes; Node u; _graph.first(u); for (; u != INVALIDNODE; _graph.next(u)) { curr = _supply[_node_id(u)]; if (curr > 0) { total += curr; supply_nodes.push_back(u); } else if (curr < 0) { demand_nodes.push_back(u); } } if (_sum_supply > 0) total -= _sum_supply; if (total <= 0) return true; ArcVector arc_vector; if (_sum_supply >= 0) { if (supply_nodes.size() == 1 && demand_nodes.size() == 1) { // Perform a reverse graph search from the sink to the source //typename GR::template NodeMap<bool> reached(_graph, false); BoolVector reached(_node_num, false); Node s = supply_nodes[0], t = demand_nodes[0]; std::vector<Node> stack; reached[t] = true; stack.push_back(t); while (!stack.empty()) { Node u, v = stack.back(); stack.pop_back(); if (v == s) break; Arc a; _graph.firstIn(a, v); for (; a != INVALID; _graph.nextIn(a)) { if (reached[u = _graph.source(a)]) continue; ArcsType j = getArcID(a); arc_vector.push_back(j); reached[u] = true; stack.push_back(u); } } } else { arc_vector.resize(demand_nodes.size()); // Find the min. cost incomming arc for each demand node #pragma omp parallel for for (ArcsType i = 0; i < ArcsType(demand_nodes.size()); ++i) { Node v = demand_nodes[i]; Cost min_cost = std::numeric_limits<Cost>::max(); Arc min_arc = INVALID; Arc a; _graph.firstIn(a, v); for (; a != INVALID; _graph.nextIn(a)) { Cost c = _cost[getArcID(a)]; if (c < min_cost) { min_cost = c; min_arc = a; } } arc_vector[i] = getArcID(min_arc); } arc_vector.erase(std::remove(arc_vector.begin(), arc_vector.end(), INVALID), arc_vector.end()); } } else { arc_vector.resize(supply_nodes.size()); // Find the min. cost outgoing arc for each supply node #pragma omp parallel for for (int i = 0; i < int(supply_nodes.size()); ++i) { Node u = supply_nodes[i]; Cost min_cost = std::numeric_limits<Cost>::max(); Arc min_arc = INVALID; Arc a; _graph.firstOut(a, u); for (; a != INVALID; _graph.nextOut(a)) { Cost c = _cost[getArcID(a)]; if (c < min_cost) { min_cost = c; min_arc = a; } } arc_vector[i] = getArcID(min_arc); } arc_vector.erase(std::remove(arc_vector.begin(), arc_vector.end(), INVALID), arc_vector.end()); } // Perform heuristic initial pivots for (ArcsType i = 0; i != ArcsType(arc_vector.size()); ++i) { in_arc = arc_vector[i]; if (_state[in_arc] * (_cost[in_arc] + _pi[_source[in_arc]] - _pi[_target[in_arc]]) >= 0) continue; findJoinNode(); bool change = findLeavingArc(); if (delta >= MAX) return false; changeFlow(change); if (change) { updateTreeStructure(); updatePotential(); } } return true; } // Execute the algorithm ProblemType start() { return start<BlockSearchPivotRule>(); } template <typename PivotRuleImpl> ProblemType start() { PivotRuleImpl pivot(*this); // Perform heuristic initial pivots if (!initialPivots()) return UNBOUNDED; size_t iter_number = 0; // Execute the Network Simplex algorithm while (pivot.findEnteringArc()) { if ((iter_number <= max_iter&&max_iter > 0) || max_iter<=0) { iter_number++; findJoinNode(); bool change = findLeavingArc(); if (delta >= MAX) return UNBOUNDED; changeFlow(change); if (change) { updateTreeStructure(); updatePotential(); } } else break; } // Check feasibility for (ArcsType e = _search_arc_num; e != _all_arc_num; ++e) { if (_flow[e] != 0) return INFEASIBLE; } // Shift potentials to meet the requirements of the GEQ/LEQ type // optimality conditions if (_sum_supply == 0) { if (_stype == GEQ) { Cost max_pot = -std::numeric_limits<Cost>::max(); for (ArcsType i = 0; i != _node_num; ++i) { if (_pi[i] > max_pot) max_pot = _pi[i]; } if (max_pot > 0) { for (ArcsType i = 0; i != _node_num; ++i) _pi[i] -= max_pot; } } else { Cost min_pot = std::numeric_limits<Cost>::max(); for (ArcsType i = 0; i != _node_num; ++i) { if (_pi[i] < min_pot) min_pot = _pi[i]; } if (min_pot < 0) { for (ArcsType i = 0; i != _node_num; ++i) _pi[i] -= min_pot; } } } return OPTIMAL; } }; //class NetworkSimplexSimple ///@} } //namespace lemon #endif //LEMON_NETWORK_SIMPLEX_H
GB_unaryop__lnot_int16_int32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_int16_int32 // op(A') function: GB_tran__lnot_int16_int32 // C type: int16_t // A type: int32_t // cast: int16_t cij = (int16_t) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ int32_t #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, x) \ int16_t z = (int16_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_INT16 || GxB_NO_INT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_int16_int32 ( int16_t *restrict Cx, const int32_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_int16_int32 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
sparselu.ref.c
#include <sys/time.h> #include <time.h> #include <stdio.h> static unsigned long long current_time_ns() { #ifdef __MACH__ clock_serv_t cclock; mach_timespec_t mts; host_get_clock_service(mach_host_self(), CALENDAR_CLOCK, &cclock); clock_get_time(cclock, &mts); mach_port_deallocate(mach_task_self(), cclock); unsigned long long s = 1000000000ULL * (unsigned long long)mts.tv_sec; return (unsigned long long)mts.tv_nsec + s; #else struct timespec t ={0,0}; clock_gettime(CLOCK_MONOTONIC, &t); unsigned long long s = 1000000000ULL * (unsigned long long)t.tv_sec; return (((unsigned long long)t.tv_nsec)) + s; #endif } /**********************************************************************************************/ /* This program is part of the Barcelona OpenMP Tasks Suite */ /* Copyright (C) 2009 Barcelona Supercomputing Center - Centro Nacional de Supercomputacion */ /* Copyright (C) 2009 Universitat Politecnica de Catalunya */ /* */ /* This program is free software; you can redistribute it and/or modify */ /* it under the terms of the GNU General Public License as published by */ /* the Free Software Foundation; either version 2 of the License, or */ /* (at your option) any later version. */ /* */ /* This program is distributed in the hope that it will be useful, */ /* but WITHOUT ANY WARRANTY; without even the implied warranty of */ /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */ /* GNU General Public License for more details. */ /* */ /* You should have received a copy of the GNU General Public License */ /* along with this program; if not, write to the Free Software */ /* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /**********************************************************************************************/ #include <stdio.h> #include <stdint.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <libgen.h> #include "bots.h" #include "sparselu.h" /*********************************************************************** * checkmat: **********************************************************************/ int checkmat (float *M, float *N) { int i, j; float r_err; for (i = 0; i < bots_arg_size_1; i++) { for (j = 0; j < bots_arg_size_1; j++) { r_err = M[i*bots_arg_size_1+j] - N[i*bots_arg_size_1+j]; if ( r_err == 0.0 ) continue; if (r_err < 0.0 ) r_err = -r_err; if ( M[i*bots_arg_size_1+j] == 0 ) { bots_message("Checking failure: A[%d][%d]=%f B[%d][%d]=%f; \n", i,j, M[i*bots_arg_size_1+j], i,j, N[i*bots_arg_size_1+j]); return FALSE; } r_err = r_err / M[i*bots_arg_size_1+j]; if(r_err > EPSILON) { bots_message("Checking failure: A[%d][%d]=%f B[%d][%d]=%f; Relative Error=%f\n", i,j, M[i*bots_arg_size_1+j], i,j, N[i*bots_arg_size_1+j], r_err); return FALSE; } } } return TRUE; } /*********************************************************************** * genmat: **********************************************************************/ void genmat (float *M[]) { int null_entry, init_val, i, j, ii, jj; float *p; int a=0,b=0; init_val = 1325; /* generating the structure */ for (ii=0; ii < bots_arg_size; ii++) { for (jj=0; jj < bots_arg_size; jj++) { /* computing null entries */ null_entry=FALSE; if ((ii<jj) && (ii%3 !=0)) null_entry = TRUE; if ((ii>jj) && (jj%3 !=0)) null_entry = TRUE; if (ii%2==1) null_entry = TRUE; if (jj%2==1) null_entry = TRUE; if (ii==jj) null_entry = FALSE; if (ii==jj-1) null_entry = FALSE; if (ii-1 == jj) null_entry = FALSE; /* allocating matrix */ if (null_entry == FALSE){ a++; M[ii*bots_arg_size+jj] = (float *) malloc(bots_arg_size_1*bots_arg_size_1*sizeof(float)); if ((M[ii*bots_arg_size+jj] == NULL)) { bots_message("Error: Out of memory\n"); exit(101); } /* initializing matrix */ p = M[ii*bots_arg_size+jj]; for (i = 0; i < bots_arg_size_1; i++) { for (j = 0; j < bots_arg_size_1; j++) { init_val = (3125 * init_val) % 65536; (*p) = (float)((init_val - 32768.0) / 16384.0); p++; } } } else { b++; M[ii*bots_arg_size+jj] = NULL; } } } bots_debug("allo = %d, no = %d, total = %d, factor = %f\n",a,b,a+b,(float)((float)a/(float)(a+b))); } /*********************************************************************** * print_structure: **********************************************************************/ void print_structure(char *name, float *M[]) { int ii, jj; bots_message("Structure for matrix %s @ 0x%p\n",name, M); for (ii = 0; ii < bots_arg_size; ii++) { for (jj = 0; jj < bots_arg_size; jj++) { if (M[ii*bots_arg_size+jj]!=NULL) {bots_message("x");} else bots_message(" "); } bots_message("\n"); } bots_message("\n"); } /*********************************************************************** * allocate_clean_block: **********************************************************************/ float * allocate_clean_block() { int i,j; float *p, *q; p = (float *) malloc(bots_arg_size_1*bots_arg_size_1*sizeof(float)); q=p; if (p!=NULL){ for (i = 0; i < bots_arg_size_1; i++) for (j = 0; j < bots_arg_size_1; j++){(*p)=0.0; p++;} } else { bots_message("Error: Out of memory\n"); exit (101); } return (q); } /*********************************************************************** * lu0: **********************************************************************/ void lu0(float *diag) { int i, j, k; for (k=0; k<bots_arg_size_1; k++) for (i=k+1; i<bots_arg_size_1; i++) { diag[i*bots_arg_size_1+k] = diag[i*bots_arg_size_1+k] / diag[k*bots_arg_size_1+k]; for (j=k+1; j<bots_arg_size_1; j++) diag[i*bots_arg_size_1+j] = diag[i*bots_arg_size_1+j] - diag[i*bots_arg_size_1+k] * diag[k*bots_arg_size_1+j]; } } /*********************************************************************** * bdiv: **********************************************************************/ void bdiv(float *diag, float *row) { int i, j, k; for (i=0; i<bots_arg_size_1; i++) for (k=0; k<bots_arg_size_1; k++) { row[i*bots_arg_size_1+k] = row[i*bots_arg_size_1+k] / diag[k*bots_arg_size_1+k]; for (j=k+1; j<bots_arg_size_1; j++) row[i*bots_arg_size_1+j] = row[i*bots_arg_size_1+j] - row[i*bots_arg_size_1+k]*diag[k*bots_arg_size_1+j]; } } /*********************************************************************** * bmod: **********************************************************************/ void bmod(float *row, float *col, float *inner) { int i, j, k; for (i=0; i<bots_arg_size_1; i++) for (j=0; j<bots_arg_size_1; j++) for (k=0; k<bots_arg_size_1; k++) inner[i*bots_arg_size_1+j] = inner[i*bots_arg_size_1+j] - row[i*bots_arg_size_1+k]*col[k*bots_arg_size_1+j]; } /*********************************************************************** * fwd: **********************************************************************/ void fwd(float *diag, float *col) { int i, j, k; for (j=0; j<bots_arg_size_1; j++) for (k=0; k<bots_arg_size_1; k++) for (i=k+1; i<bots_arg_size_1; i++) col[i*bots_arg_size_1+j] = col[i*bots_arg_size_1+j] - diag[i*bots_arg_size_1+k]*col[k*bots_arg_size_1+j]; } void sparselu_init (float ***pBENCH, char *pass) { *pBENCH = (float **) malloc(bots_arg_size*bots_arg_size*sizeof(float *)); genmat(*pBENCH); print_structure(pass, *pBENCH); } void sparselu_seq_call(float **BENCH) { int ii, jj, kk; for (kk=0; kk<bots_arg_size; kk++) { lu0(BENCH[kk*bots_arg_size+kk]); for (jj=kk+1; jj<bots_arg_size; jj++) if (BENCH[kk*bots_arg_size+jj] != NULL) { fwd(BENCH[kk*bots_arg_size+kk], BENCH[kk*bots_arg_size+jj]); } for (ii=kk+1; ii<bots_arg_size; ii++) if (BENCH[ii*bots_arg_size+kk] != NULL) { bdiv (BENCH[kk*bots_arg_size+kk], BENCH[ii*bots_arg_size+kk]); } for (ii=kk+1; ii<bots_arg_size; ii++) if (BENCH[ii*bots_arg_size+kk] != NULL) for (jj=kk+1; jj<bots_arg_size; jj++) if (BENCH[kk*bots_arg_size+jj] != NULL) { if (BENCH[ii*bots_arg_size+jj]==NULL) BENCH[ii*bots_arg_size+jj] = allocate_clean_block(); bmod(BENCH[ii*bots_arg_size+kk], BENCH[kk*bots_arg_size+jj], BENCH[ii*bots_arg_size+jj]); } } } void sparselu_par_call(float **BENCH) { int ii, jj, kk; const unsigned long long full_program_start = current_time_ns(); { bots_message("Computing SparseLU Factorization (%dx%d matrix with %dx%d blocks) ", bots_arg_size,bots_arg_size,bots_arg_size_1,bots_arg_size_1); #pragma omp parallel private(kk) { #pragma omp single for (kk=0; kk<bots_arg_size; kk++) { lu0(BENCH[kk*bots_arg_size+kk]); for (jj=kk+1; jj<bots_arg_size; jj++) if (BENCH[kk*bots_arg_size+jj] != NULL) { #pragma omp task untied firstprivate(kk, jj) shared(BENCH) { fwd(BENCH[kk*bots_arg_size+kk], BENCH[kk*bots_arg_size+jj]); } } for (ii=kk+1; ii<bots_arg_size; ii++) if (BENCH[ii*bots_arg_size+kk] != NULL) { #pragma omp task untied firstprivate(kk, ii) shared(BENCH) { bdiv (BENCH[kk*bots_arg_size+kk], BENCH[ii*bots_arg_size+kk]); } } for (ii=kk+1; ii<bots_arg_size; ii++) if (BENCH[ii*bots_arg_size+kk] != NULL) for (jj=kk+1; jj<bots_arg_size; jj++) if (BENCH[kk*bots_arg_size+jj] != NULL) { #pragma omp task untied firstprivate(kk, jj, ii) shared(BENCH) { if (BENCH[ii*bots_arg_size+jj]==NULL) BENCH[ii*bots_arg_size+jj] = allocate_clean_block(); bmod(BENCH[ii*bots_arg_size+kk], BENCH[kk*bots_arg_size+jj], BENCH[ii*bots_arg_size+jj]); } } } } bots_message(" completed!\n"); } ; const unsigned long long full_program_end = current_time_ns(); printf("full_program %llu ns\n", full_program_end - full_program_start); } void sparselu_fini (float **BENCH, char *pass) { print_structure(pass, BENCH); } int sparselu_check(float **SEQ, float **BENCH) { int ii,jj,ok=1; for (ii=0; ((ii<bots_arg_size) && ok); ii++) { for (jj=0; ((jj<bots_arg_size) && ok); jj++) { if ((SEQ[ii*bots_arg_size+jj] == NULL) && (BENCH[ii*bots_arg_size+jj] != NULL)) ok = FALSE; if ((SEQ[ii*bots_arg_size+jj] != NULL) && (BENCH[ii*bots_arg_size+jj] == NULL)) ok = FALSE; if ((SEQ[ii*bots_arg_size+jj] != NULL) && (BENCH[ii*bots_arg_size+jj] != NULL)) ok = checkmat(SEQ[ii*bots_arg_size+jj], BENCH[ii*bots_arg_size+jj]); } } if (ok) return BOTS_RESULT_SUCCESSFUL; else return BOTS_RESULT_UNSUCCESSFUL; }
GB_binop__isle_int32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__isle_int32) // A.*B function (eWiseMult): GB (_AemultB_08__isle_int32) // A.*B function (eWiseMult): GB (_AemultB_02__isle_int32) // A.*B function (eWiseMult): GB (_AemultB_04__isle_int32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__isle_int32) // A*D function (colscale): GB (_AxD__isle_int32) // D*A function (rowscale): GB (_DxB__isle_int32) // C+=B function (dense accum): GB (_Cdense_accumB__isle_int32) // C+=b function (dense accum): GB (_Cdense_accumb__isle_int32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isle_int32) // C=scalar+B GB (_bind1st__isle_int32) // C=scalar+B' GB (_bind1st_tran__isle_int32) // C=A+scalar GB (_bind2nd__isle_int32) // C=A'+scalar GB (_bind2nd_tran__isle_int32) // C type: int32_t // A type: int32_t // A pattern? 0 // B type: int32_t // B pattern? 0 // BinaryOp: cij = (aij <= bij) #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ int32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int32_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int32_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x <= y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISLE || GxB_NO_INT32 || GxB_NO_ISLE_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__isle_int32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__isle_int32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__isle_int32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__isle_int32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__isle_int32) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__isle_int32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int32_t alpha_scalar ; int32_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int32_t *) alpha_scalar_in)) ; beta_scalar = (*((int32_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__isle_int32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__isle_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__isle_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__isle_int32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__isle_int32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *Cx = (int32_t *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int32_t bij = GBX (Bx, p, false) ; Cx [p] = (x <= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__isle_int32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int32_t *Cx = (int32_t *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int32_t aij = GBX (Ax, p, false) ; Cx [p] = (aij <= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x <= aij) ; \ } GrB_Info GB (_bind1st_tran__isle_int32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij <= y) ; \ } GrB_Info GB (_bind2nd_tran__isle_int32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
c_jacobi01.c
/* *********************************************************************** This program is part of the OpenMP Source Code Repository http://www.pcg.ull.es/ompscr/ e-mail: ompscr@etsii.ull.es Copyright (c) 2004, OmpSCR Group All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the University of La Laguna nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. FILE: c_jacobi01.c VERSION: 1.1 DATE: Oct 2004 AUTHORS: Author: Joseph Robicheaux, Kuck and Associates, Inc. (KAI), 1998 Modified: Sanjiv Shah, Kuck and Associates, Inc. (KAI), 1998 This version: Dieter an Mey, Aachen University (RWTH), 1999 - 2003 anmey@rz.rwth-aachen.de http://www.rwth-aachen.de/People/D.an.Mey.html COMMENTS TO: ompscr@etsii.ull.es DESCRIPTION: program to solve a finite difference discretization of Helmholtz equation : (d2/dx2)u + (d2/dy2)u - alpha u = f using Jacobi iterative method. COMMENTS: OpenMP version 1: two parallel regions with one parallel loop each, the naive approach. Directives are used in this code to achieve paralleism. All do loops are parallized with default 'static' scheduling. REFERENCES: http://www.rz.rwth-aachen.de/computing/hpc/prog/par/openmp/jacobi.html BASIC PRAGMAS: parallel for USAGE: ./c_jacobi01.par 5000 5000 0.8 1.0 1000 INPUT: n - grid dimension in x direction m - grid dimension in y direction alpha - Helmholtz constant (always greater than 0.0) tol - error tolerance for iterative solver relax - Successice over relaxation parameter mits - Maximum iterations for iterative solver OUTPUT: Residual and error u(n,m) - Dependent variable (solutions) f(n,m) - Right hand side function FILE FORMATS: - RESTRICTIONS: - REVISION HISTORY: **************************************************************************/ #include <omp.h> #include <stdio.h> #include <math.h> #include <stdlib.h> //#include "OmpSCR.h" #define U(i,j) u[(i)*n+(j)] #define F(i,j) f[(i)*n+(j)] #define NUM_ARGS 6 #define NUM_TIMERS 1 #define NIN 4 #define MIN 4 #define ALPHA 0.1 #define TOL 0.1 #define RELAX 2 #define MITS 2 int n, m, mits; double tol, relax, alpha; void jacobi (int n, int m, double dx, double dy, double alpha, double omega, double *u, double *f, double tol, int maxit ); /****************************************************** * Initializes data * Assumes exact solution is u(x,y) = (1-x^2)*(1-y^2) * ******************************************************/ void initialize( int n, int m, double alpha, double *dx, double *dy, double *u, double *f) { int i,j,xx,yy; *dx = 2.0 / (n-1); *dy = 2.0 / (m-1); /* Initilize initial condition and RHS */ for (j=0; j<m; j++){ for (i=0; i<n; i++){ xx = -1.0 + *dx * (i-1); yy = -1.0 + *dy * (j-1); U(j,i) = 0.0; F(j,i) = -alpha * (1.0 - xx*xx) * (1.0 - yy*yy) - 2.0 * (1.0 - xx*xx) - 2.0 * (1.0 - yy*yy); } } } /************************************************************ * Checks error between numerical and exact solution * ************************************************************/ void error_check( int n, int m, double alpha, double dx, double dy, double *u, double *f) { int i,j; double xx, yy, temp, error; dx = 2.0 / (n-1); dy = 2.0 / (n-2); error = 0.0; for (j=0; j<m; j++){ for (i=0; i<n; i++){ xx = -1.0 + dx * (i-1); yy = -1.0 + dy * (j-1); temp = U(j,i) - (1.0 - xx*xx) * (1.0 - yy*yy); error += temp*temp; } } error = sqrt(error)/(n*m); printf("Solution Error : %g\n", error); } int main(int argc, char **argv){ double *u, *f, dx, dy; double dt, mflops; int NUMTHREADS; // char *PARAM_NAMES[NUM_ARGS] = {"Grid dimension: X dir =", "Grid dimension: Y dir =", "Helmhotlz constant =", // "Successive over-relaxation parameter =", // "error tolerance for iterative solver =", "Maximum iterations for solver ="}; // char *TIMERS_NAMES[NUM_TIMERS] = {"Total_time"}; // char *DEFAULT_VALUES[NUM_ARGS] = {"5000", "5000", "0.8", "1.0", "1e-7", "1000"}; NUMTHREADS = 1; //omp_get_num_threads(); //OSCR_init (NUMTHREADS, "Jacobi Solver v1", "Use 'jacobi01' <n> <m> <alpha> <relax> <tol> <mits>", NUM_ARGS, // PARAM_NAMES, DEFAULT_VALUES , NUM_TIMERS, NUM_TIMERS, TIMERS_NAMES, // argc, argv); n = NIN; // OSCR_getarg_int(1); m = MIN; // OSCR_getarg_int(2); alpha = ALPHA; // OSCR_getarg_double(3); relax = RELAX; // OSCR_getarg_double(4); tol = TOL; // OSCR_getarg_double(5); mits = MITS; // OSCR_getarg_int(6); printf("-> %d, %d, %g, %g, %g, %d\n", n, m, alpha, relax, tol, mits); u = (double *) malloc(n*m*sizeof(double)); f = (double *) malloc(n*m*sizeof(double)); /* arrays are allocated and initialzed */ initialize(n, m, alpha, &dx, &dy, u, f); /* Solve Helmholtz eqiation */ //OSCR_timer_start(0); jacobi(n, m, dx, dy, alpha, relax, u,f, tol, mits); //OSCR_timer_stop(0); dt = 1; //OSCR_timer_read(0); printf(" elapsed time : %12.6f\n", dt); mflops = (0.000001*mits*(m-2)*(n-2)*13) / dt; printf(" MFlops : %12.6g (%d, %d, %d, %g)\n",mflops, mits, m, n, dt); error_check(n, m, alpha, dx, dy, u, f); //OSCR_report(1, TIMERS_NAMES); return 0; } /* subroutine jacobi (n,m,dx,dy,alpha,omega,u,f,tol,maxit) ****************************************************************** * Subroutine HelmholtzJ * Solves poisson equation on rectangular grid assuming : * (1) Uniform discretization in each direction, and * (2) Dirichlect boundary conditions * * Jacobi method is used in this routine * * Input : n,m Number of grid points in the X/Y directions * dx,dy Grid spacing in the X/Y directions * alpha Helmholtz eqn. coefficient * omega Relaxation factor * f(n,m) Right hand side function * u(n,m) Dependent variable/Solution * tol Tolerance for iterative solver * maxit Maximum number of iterations * * Output : u(n,m) - Solution ***************************************************************** */ void jacobi ( const int n, const int m, double dx, double dy, double alpha, double omega, double *u, double *f, double tol, int maxit ) { int i,j,k; double error, resid, ax, ay, b; double *uold; /* wegen Array-Kompatibilitaet, werden die Zeilen und Spalten (im Kopf) getauscht, zB uold[spalten_num][zeilen_num]; bzw. wir tuen so, als ob wir das gespiegelte Problem loesen wollen */ uold = (double *)malloc(sizeof(double) * n *m); ax = 1.0/(dx * dx); /* X-direction coef */ ay = 1.0/(dy*dy); /* Y_direction coef */ b = -2.0/(dx*dx)-2.0/(dy*dy) - alpha; /* Central coeff */ error = 10.0 * tol; k = 1; while (k <= maxit && error > tol) { error = 0.0; /* copy new solution into old */ #pragma omp parallel for private(i) for (j=0; j<m; j++) for (i=0; i<n; i++) uold[i + m*j] = u[i + m*j]; /* compute stencil, residual and update */ #pragma omp parallel for reduction(+:error) private(i,resid) for (j=1; j<m-1; j++) for (i=1; i<n-1; i++){ resid =( ax * (uold[i-1 + m*j] + uold[i+1 + m*j]) + ay * (uold[i + m*(j-1)] + uold[i + m*(j+1)]) + b * uold[i + m*j] - f[i + m*j] ) / b; /* update solution */ u[i + m*j] = uold[i + m*j] - omega * resid; /* accumulate residual error */ error =error + resid*resid; } /* error check */ k++; error = sqrt(error) /(n*m); } /* while */ printf("Total Number of Iterations %d\n", k); printf("Residual %.15f\n\n", error); free(uold); }
zunmlq.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @precisions normal z -> s d c * **/ #include "plasma.h" #include "plasma_async.h" #include "plasma_context.h" #include "plasma_descriptor.h" #include "plasma_internal.h" #include "plasma_tuning.h" #include "plasma_types.h" #include "plasma_workspace.h" /***************************************************************************//** * * @ingroup plasma_unmlq * * Overwrites the general complex m-by-n matrix C with * * side = PlasmaLeft side = PlasmaRight * trans = PlasmaNoTrans Q * C C * Q * trans = Plasma_ConjTrans Q^H * C C * Q^H * * where Q is an orthogonal (or unitary) matrix defined as the product of k * elementary reflectors * * Q = H(1) H(2) . . . H(k) * * as returned by plasma_zgelqf. Q is of order m if side = PlasmaLeft * and of order n if side = PlasmaRight. * ******************************************************************************* * * @param[in] side * Intended usage: * - PlasmaLeft: apply Q or Q^H from the left; * - PlasmaRight: apply Q or Q^H from the right. * * @param[in] trans * Intended usage: * - PlasmaNoTrans: apply Q; * - Plasma_ConjTrans: apply Q^H. * * @param[in] m * The number of rows of the matrix C. m >= 0. * * @param[in] n * The number of columns of the matrix C. n >= 0. * * @param[in] k * The number of rows of elementary tile reflectors whose product * defines the matrix Q. * If side == PlasmaLeft, m >= k >= 0. * If side == PlasmaRight, n >= k >= 0. * * @param[in] pA * Details of the LQ factorization of the original matrix A as returned * by plasma_zgelqf. * * @param[in] lda * The leading dimension of the array A. lda >= max(1,k). * * @param[in] T * Auxiliary factorization data, computed by plasma_zgelqf. * * @param[in,out] pC * On entry, pointer to the m-by-n matrix C. * On exit, C is overwritten by Q*C, Q^H*C, C*Q, or C*Q^H. * * @param[in] ldc * The leading dimension of the array C. ldc >= max(1,m). * ******************************************************************************* * * @retval PlasmaSuccess successful exit * @retval < 0 if -i, the i-th argument had an illegal value * ******************************************************************************* * * @sa plasma_omp_zunmlq * @sa plasma_cunmlq * @sa plasma_dormlq * @sa plasma_sormlq * @sa plasma_zgelqf * ******************************************************************************/ int plasma_zunmlq(plasma_enum_t side, plasma_enum_t trans, int m, int n, int k, plasma_complex64_t *pA, int lda, plasma_desc_t T, plasma_complex64_t *pC, int ldc) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_fatal_error("PLASMA not initialized"); return PlasmaErrorNotInitialized; } // Check input arguments. if ((side != PlasmaLeft) && (side != PlasmaRight)) { plasma_error("illegal value of side"); return -1; } if ((trans != Plasma_ConjTrans) && (trans != PlasmaNoTrans)) { plasma_error("illegal value of trans"); return -2; } if (m < 0) { plasma_error("illegal value of m"); return -3; } if (n < 0) { plasma_error("illegal value of n"); return -4; } int an; if (side == PlasmaLeft) { an = m; } else { an = n; } if ((k < 0) || (k > an)) { plasma_error("illegal value of k"); return -5; } if (lda < imax(1, k)) { plasma_error("illegal value of lda"); return -7; } if (ldc < imax(1, m)) { plasma_error("illegal value of ldc"); return -10; } // quick return if (m == 0 || n == 0 || k == 0) return PlasmaSuccess; // Tune parameters. if (plasma->tuning) plasma_tune_gelqf(plasma, PlasmaComplexDouble, m, n); // Set tiling parameters. int ib = plasma->ib; int nb = plasma->nb; // Create tile matrices. plasma_desc_t A; plasma_desc_t C; int retval; retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb, k, an, 0, 0, k, an, &A); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); return retval; } retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb, m, n, 0, 0, m, n, &C); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); plasma_desc_destroy(&A); return retval; } // Allocate workspace. plasma_workspace_t work; size_t lwork = ib*nb; // unmlq: work retval = plasma_workspace_create(&work, lwork, PlasmaComplexDouble); if (retval != PlasmaSuccess) { plasma_error("plasma_workspace_create() failed"); return retval; } // Initialize sequence. plasma_sequence_t sequence; retval = plasma_sequence_init(&sequence); // Initialize request. plasma_request_t request; retval = plasma_request_init(&request); // asynchronous block #pragma omp parallel #pragma omp master { // Translate to tile layout. plasma_omp_zge2desc(pA, lda, A, &sequence, &request); plasma_omp_zge2desc(pC, ldc, C, &sequence, &request); // Call the tile async function. plasma_omp_zunmlq(side, trans, A, T, C, work, &sequence, &request); // Translate back to LAPACK layout. plasma_omp_zdesc2ge(C, pC, ldc, &sequence, &request); } // implicit synchronization plasma_workspace_destroy(&work); // Free matrices in tile layout. plasma_desc_destroy(&A); plasma_desc_destroy(&C); // Return status. int status = sequence.status; return status; } /***************************************************************************//** * * @ingroup plasma_unmlq * * Non-blocking tile version of plasma_zunmlq(). * May return before the computation is finished. * Allows for pipelining of operations at runtime. * ******************************************************************************* * * @param[in] side * Intended usage: * - PlasmaLeft: apply Q or Q^H from the left; * - PlasmaRight: apply Q or Q^H from the right. * * @param[in] trans * Intended usage: * - PlasmaNoTrans: apply Q; * - Plasma_ConjTrans: apply Q^H. * * @param[in] A * Descriptor of matrix A stored in the tile layout. * Details of the QR factorization of the original matrix A as returned * by plasma_zgeqrf. * * @param[in] T * Descriptor of matrix T. * Auxiliary factorization data, computed by plasma_zgeqrf. * * @param[in,out] C * Descriptor of matrix C. * On entry, the m-by-n matrix C. * On exit, C is overwritten by Q*C, Q^H*C, C*Q, or C*Q^H. * * @param[in] work * Workspace for the auxiliary arrays needed by some coreblas kernels. * For multiplication by Q contains preallocated space for work * arrays. Allocated by the plasma_workspace_create function. * * @param[in] sequence * Identifies the sequence of function calls that this call belongs to * (for completion checks and exception handling purposes). * * @param[out] request * Identifies this function call (for exception handling purposes). * * @retval void * Errors are returned by setting sequence->status and * request->status to error values. The sequence->status and * request->status should never be set to PlasmaSuccess (the * initial values) since another async call may be setting a * failure value at the same time. * ******************************************************************************* * * @sa plasma_zunmlq * @sa plasma_omp_cunmlq * @sa plasma_omp_dormlq * @sa plasma_omp_sormlq * @sa plasma_omp_zgelqf * ******************************************************************************/ void plasma_omp_zunmlq(plasma_enum_t side, plasma_enum_t trans, plasma_desc_t A, plasma_desc_t T, plasma_desc_t C, plasma_workspace_t work, plasma_sequence_t *sequence, plasma_request_t *request) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_error("PLASMA not initialized"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // Check input arguments. if ((side != PlasmaLeft) && (side != PlasmaRight)) { plasma_error("invalid value of side"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if ((trans != Plasma_ConjTrans) && (trans != PlasmaNoTrans)) { plasma_error("invalid value of trans"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(A) != PlasmaSuccess) { plasma_error("invalid A"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(T) != PlasmaSuccess) { plasma_error("invalid T"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(C) != PlasmaSuccess) { plasma_error("invalid C"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (sequence == NULL) { plasma_error("NULL sequence"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (request == NULL) { plasma_error("NULL request"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // quick return if (C.m == 0 || C.n == 0 || A.m == 0 || A.n == 0) return; // Call the parallel function. if (plasma->householder_mode == PlasmaTreeHouseholder) { plasma_pzunmlq_tree(side, trans, A, T, C, work, sequence, request); } else { plasma_pzunmlq(side, trans, A, T, C, work, sequence, request); } }
3d7pt.c
/* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 24; tile_size[1] = 24; tile_size[2] = 16; tile_size[3] = 2048; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k]) + beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] + A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
GB_unop__ceil_fc32_fc32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__ceil_fc32_fc32 // op(A') function: GB_unop_tran__ceil_fc32_fc32 // C type: GxB_FC32_t // A type: GxB_FC32_t // cast: GxB_FC32_t cij = aij // unaryop: cij = GB_cceilf (aij) #define GB_ATYPE \ GxB_FC32_t #define GB_CTYPE \ GxB_FC32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_cceilf (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC32_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC32_t z = aij ; \ Cx [pC] = GB_cceilf (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_CEIL || GxB_NO_FC32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__ceil_fc32_fc32 ( GxB_FC32_t *Cx, // Cx and Ax may be aliased const GxB_FC32_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = GB_cceilf (z) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__ceil_fc32_fc32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__bxnor_uint16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bxnor_uint16) // A.*B function (eWiseMult): GB (_AemultB_08__bxnor_uint16) // A.*B function (eWiseMult): GB (_AemultB_02__bxnor_uint16) // A.*B function (eWiseMult): GB (_AemultB_04__bxnor_uint16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bxnor_uint16) // A*D function (colscale): GB (_AxD__bxnor_uint16) // D*A function (rowscale): GB (_DxB__bxnor_uint16) // C+=B function (dense accum): GB (_Cdense_accumB__bxnor_uint16) // C+=b function (dense accum): GB (_Cdense_accumb__bxnor_uint16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bxnor_uint16) // C=scalar+B GB (_bind1st__bxnor_uint16) // C=scalar+B' GB (_bind1st_tran__bxnor_uint16) // C=A+scalar GB (_bind2nd__bxnor_uint16) // C=A'+scalar GB (_bind2nd_tran__bxnor_uint16) // C type: uint16_t // A type: uint16_t // B,b type: uint16_t // BinaryOp: cij = ~((aij) ^ (bij)) #define GB_ATYPE \ uint16_t #define GB_BTYPE \ uint16_t #define GB_CTYPE \ uint16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint16_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint16_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = ~((x) ^ (y)) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BXNOR || GxB_NO_UINT16 || GxB_NO_BXNOR_UINT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__bxnor_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bxnor_uint16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bxnor_uint16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint16_t uint16_t bwork = (*((uint16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__bxnor_uint16) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__bxnor_uint16) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bxnor_uint16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__bxnor_uint16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bxnor_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__bxnor_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bxnor_uint16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bxnor_uint16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t x = (*((uint16_t *) x_input)) ; uint16_t *Bx = (uint16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint16_t bij = GBX (Bx, p, false) ; Cx [p] = ~((x) ^ (bij)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bxnor_uint16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t *Ax = (uint16_t *) Ax_input ; uint16_t y = (*((uint16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint16_t aij = GBX (Ax, p, false) ; Cx [p] = ~((aij) ^ (y)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = ~((x) ^ (aij)) ; \ } GrB_Info GB (_bind1st_tran__bxnor_uint16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t x = (*((const uint16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = ~((aij) ^ (y)) ; \ } GrB_Info GB (_bind2nd_tran__bxnor_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t y = (*((const uint16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
projector.c
#include <stdio.h> #include <stdlib.h> #include <complex.h> #include <math.h> #include <omp.h> #include <time.h> #include "utils.h" #include "projector.h" #include <mkl.h> #include <mkl_types.h> #include "linalg.h" #include "quadrature.h" #include "radial.h" #include "sbt.h" #define c 0.262465831 #define PI 3.14159265358979323846 #define DENSE_GRID_SCALE 1 ppot_t* get_projector_list(int num_els, int* labels, int* ls, double* wave_grids, double* projectors, double* aewaves, double* pswaves, double* rmaxs, double grid_encut) { setbuf(stdout,NULL); ppot_t* pps = (ppot_t*) malloc(num_els * sizeof(ppot_t)); CHECK_ALLOCATION(pps); int wt = 0; int pt = 0; int wgt = 0; int pgt = 0; int l_num = 0; for (int i = 0; i < num_els; i++) { pps[i].num_projs = labels[4*i+1]; pps[i].rmax = rmaxs[i]; pps[i].proj_gridsize = labels[4*i+2]; pps[i].wave_gridsize = labels[4*i+3]; pps[i].total_projs = 0; pps[i].wave_grid = (double*) malloc((pps[i].wave_gridsize)*sizeof(double)); pps[i].kwave_grid = (double*) malloc((pps[i].wave_gridsize)*sizeof(double)); CHECK_ALLOCATION(pps[i].wave_grid); CHECK_ALLOCATION(pps[i].kwave_grid); pps[i].lmax = 0; pps[i].pspw_overlap_matrix = NULL; pps[i].aepw_overlap_matrix = NULL; pps[i].diff_overlap_matrix = NULL; for (int j = 0; j < pps[i].wave_gridsize; j++) { pps[i].wave_grid[j] = wave_grids[wgt]; wgt++; } pps[i].proj_grid = (double*) malloc(pps[i].proj_gridsize*sizeof(double)); CHECK_ALLOCATION(pps[i].proj_grid); for (int j = 0; j < pps[i].proj_gridsize; j++) { pps[i].proj_grid[j] = pps[i].rmax / pps[i].proj_gridsize * j; pgt++; } funcset_t* funcs = (funcset_t*) malloc(pps[i].num_projs*sizeof(funcset_t)); CHECK_ALLOCATION(funcs); double* dense_wavegrid = (double*) malloc(DENSE_GRID_SCALE * pps[i].wave_gridsize * sizeof(double)); CHECK_ALLOCATION(dense_wavegrid); dense_wavegrid[0] = pps[i].wave_grid[0]; double factor = pow(pps[i].wave_grid[1]/pps[i].wave_grid[0], 1.0/DENSE_GRID_SCALE); for (int p = 1; p < pps[i].wave_gridsize * DENSE_GRID_SCALE; p++) { dense_wavegrid[p] = dense_wavegrid[p-1] * factor; } pps[i].wave_rmax = pps[i].wave_grid[pps[i].wave_gridsize-1]; pps[i].smooth_grid = (double*) malloc(pps[i].proj_gridsize * sizeof(double)); for (int j = 0; j < pps[i].proj_gridsize; j++) { pps[i].smooth_grid[j] = pps[i].wave_rmax / pps[i].proj_gridsize * j; } double* dense_kwavegrid = (double*) malloc(DENSE_GRID_SCALE * pps[i].wave_gridsize * sizeof(double)); CHECK_ALLOCATION(dense_kwavegrid); for (int k = 0; k < pps[i].num_projs; k++) { funcs[k].proj = (double*) malloc(sizeof(double)*pps[i].proj_gridsize); funcs[k].aewave = (double*) malloc(sizeof(double)*pps[i].wave_gridsize); funcs[k].pswave = (double*) malloc(sizeof(double)*pps[i].wave_gridsize); funcs[k].diffwave = (double*) malloc(sizeof(double)*pps[i].wave_gridsize); CHECK_ALLOCATION(funcs[k].proj); CHECK_ALLOCATION(funcs[k].aewave); CHECK_ALLOCATION(funcs[k].pswave); CHECK_ALLOCATION(funcs[k].diffwave); funcs[k].l = ls[l_num]; if (funcs[k].l > pps[i].lmax) pps[i].lmax = funcs[k].l; pps[i].total_projs += 2 * ls[l_num] + 1; l_num++; for (int j = 0; j < pps[i].wave_gridsize; j++) { funcs[k].aewave[j] = aewaves[wt]; funcs[k].pswave[j] = pswaves[wt]; funcs[k].diffwave[j] = aewaves[wt] - pswaves[wt]; wt++; } for (int j = 0; j < pps[i].proj_gridsize; j++) { funcs[k].proj[j] = projectors[pt]; pt++; } funcs[k].proj_spline = spline_coeff(pps[i].proj_grid, funcs[k].proj, pps[i].proj_gridsize); funcs[k].aewave_spline = spline_coeff(pps[i].wave_grid, funcs[k].aewave, pps[i].wave_gridsize); funcs[k].pswave_spline = spline_coeff(pps[i].wave_grid, funcs[k].pswave, pps[i].wave_gridsize); funcs[k].diffwave_spline = spline_coeff(pps[i].wave_grid, funcs[k].diffwave, pps[i].wave_gridsize); } sbt_descriptor_t* d = spherical_bessel_transform_setup(1e7, 0, pps[i].lmax, pps[i].wave_gridsize, pps[i].wave_grid, pps[i].kwave_grid); for (int k = 0; k < pps[i].num_projs; k++) { funcs[k].kwave = wave_spherical_bessel_transform(d, funcs[k].diffwave, funcs[k].l); //funcs[k].kwave = besselt(pps[i].wave_grid, pps[i].kwave_grid, funcs[k].diffwave, 520.0, pps[i].wave_gridsize, funcs[k].l); funcs[k].kwave_spline = spline_coeff(pps[i].kwave_grid, funcs[k].kwave, pps[i].wave_gridsize); } for (int k = 0; k < pps[i].num_projs; k++) { //double* dense_kwave = wave_spherical_bessel_transform(d2, funcs[k].smooth_diffwave, funcs[k].l); double* dense_kwave = (double*) calloc(pps[i].wave_gridsize * DENSE_GRID_SCALE, sizeof(double)); int q = 0; while (pps[i].kwave_grid[q] < pow(c*grid_encut, 0.5)) { dense_kwave[q] = funcs[k].kwave[q]; q++; } double* smooth_diffwave = inverse_wave_spherical_bessel_transform(d, dense_kwave, funcs[k].l); double** smooth_wave_spline = spline_coeff(dense_wavegrid, smooth_diffwave, DENSE_GRID_SCALE*pps[i].wave_gridsize); free(dense_kwave); double* sdw = (double*) malloc(pps[i].proj_gridsize*sizeof(double)); if (funcs[k].l > 0) sdw[0] = 0; for (int p = 1; p < pps[i].proj_gridsize; p++) { // smooth_grid should be like proj_grid except that rmax should be rmax of the partial wave // difference sdw[p] = wave_interpolate(pps[i].smooth_grid[p], pps[i].wave_gridsize*DENSE_GRID_SCALE, dense_wavegrid, smooth_diffwave, smooth_wave_spline); } if (funcs[k].l == 0) sdw[0] = sdw[1]; double** sdw_spline = spline_coeff(pps[i].smooth_grid, sdw, pps[i].proj_gridsize); funcs[k].smooth_diffwave = sdw; funcs[k].smooth_diffwave_spline = sdw_spline; free(smooth_diffwave); free(smooth_wave_spline[0]); free(smooth_wave_spline[1]); free(smooth_wave_spline[2]); free(smooth_wave_spline); } free_sbt_descriptor(d); pps[i].funcs = funcs; make_pwave_overlap_matrices(pps+i); } mkl_free_buffers(); printf("finished making projector list\n"); return pps; } double* besselt(double* r, double* k, double* f, double encut, int N, int l) { double kmax = pow(encut*c, 0.5); double drho = log(r[1]/r[0]); double kmin = kmax * exp((1-N)*drho); double* g = (double*) malloc(N*sizeof(double)); CHECK_ALLOCATION(g); for (int i = 0; i < N; i++) { double dr = r[0]; g[i] = 0; k[i] = kmin * exp(i*drho); for (int j = 0; j < N; j++) { g[i] += f[j] * r[j] * sph_bessel(k[i], r[j], l) * dr; if (j != N-1) dr = r[j+1]-r[j]; } } return g; } real_proj_site_t* projector_values(int num_sites, int* labels, double* coords, double* lattice, double* reclattice, ppot_t* pps, int* fftg) { real_proj_site_t* sites = (real_proj_site_t*) malloc(num_sites * sizeof(real_proj_site_t)); CHECK_ALLOCATION(sites); int* all_sites = (int*) malloc(num_sites * sizeof(int)); for (int i = 0; i < num_sites; i++) { all_sites[i] = i; } setup_site(sites, pps, num_sites, all_sites, labels, coords, lattice, fftg, 0); free(all_sites); return sites; } real_proj_site_t* smooth_pw_values(int num_N, int* Nlst, int* labels, double* coords, double* lattice, double* reclattice, ppot_t* pps, int* fftg) { real_proj_site_t* sites = (real_proj_site_t*) malloc(num_N * sizeof(real_proj_site_t)); CHECK_ALLOCATION(sites); setup_site(sites, pps, num_N, Nlst, labels, coords, lattice, fftg, 1); return sites; } void onto_projector_helper(band_t* band, double complex* x, real_proj_site_t* sites, int num_sites, double* lattice, double* reclattice, double* kpt, int num_cart_gridpts, int* fftg, projection_t* projections) { double dv = determinant(lattice) / fftg[0] / fftg[1] / fftg[2]; double kdotr = 0; double kpt_cart[3] = {0,0,0}; kpt_cart[0] = kpt[0]; kpt_cart[1] = kpt[1]; kpt_cart[2] = kpt[2]; frac_to_cartesian(kpt_cart, reclattice); double complex overlap; double complex* values; double complex* xvals = (double complex*) malloc(num_cart_gridpts * sizeof(double complex)); int* indices; int num_indices, index; for (int s = 0; s < num_sites; s++) { num_indices = sites[s].num_indices; indices = sites[s].indices; projections[s].num_projs = sites[s].num_projs; projections[s].total_projs = sites[s].total_projs; projections[s].ns = malloc(sites[s].total_projs * sizeof(int)); projections[s].ls = malloc(sites[s].total_projs * sizeof(int)); projections[s].ms = malloc(sites[s].total_projs * sizeof(int)); projections[s].overlaps = (double complex*) malloc(sites[s].total_projs * sizeof(double complex)); CHECK_ALLOCATION(projections[s].ns); CHECK_ALLOCATION(projections[s].ls); CHECK_ALLOCATION(projections[s].ms); CHECK_ALLOCATION(projections[s].overlaps); for (int i = 0; i < num_indices; i++) { index = indices[i]; kdotr = dot(kpt_cart, sites[s].paths+i*3); xvals[i] = x[index] * dv * cexp(I * kdotr); } for (int p = 0; p < sites[s].total_projs; p++) { projections[s].ns[p] = sites[s].projs[p].func_num; projections[s].ls[p] = sites[s].projs[p].l; projections[s].ms[p] = sites[s].projs[p].m; values = sites[s].projs[p].values; cblas_zdotc_sub(num_indices, values, 1, xvals, 1, &overlap); projections[s].overlaps[p] = overlap; } } free(xvals); } void get_aug_freqs_helper(band_t* band, double complex* x, real_proj_site_t* sites, int num_sites, double* lattice, double* reclattice, double* kpt, int num_cart_gridpts, int* fftg, projection_t* projections) { double dv = determinant(lattice) / fftg[0] / fftg[1] / fftg[2]; double kdotr = 0; int gridsize = fftg[0] * fftg[1] * fftg[2]; for (int w = 0; w < gridsize; w++) { x[w] = 0; } double kpt_cart[3] = {0,0,0}; kpt_cart[0] = kpt[0]; kpt_cart[1] = kpt[1]; kpt_cart[2] = kpt[2]; frac_to_cartesian(kpt_cart, reclattice); double complex overlap; double complex* values; int* indices; int i, j, k, ii, jj, kk; double frac[3]; double phasecoord[3]; double complex phase; int num_indices, index; for (int s = 0; s < num_sites; s++) { num_indices = sites[s].num_indices; indices = sites[s].indices; for (int ind = 0; ind < num_indices; ind++) { index = indices[ind]; i = index / (fftg[1]*fftg[2]); j = index % (fftg[1]*fftg[2]); j = j / (fftg[2]); k = index % (fftg[2]); frac[0] = (double) i / fftg[0]; frac[1] = (double) j / fftg[1]; frac[2] = (double) k / fftg[2]; phasecoord[0] = -sites[s].paths[3*ind+0];// + frac[0]; phasecoord[1] = -sites[s].paths[3*ind+1];// + frac[1]; phasecoord[2] = -sites[s].paths[3*ind+2];// + frac[2]; phase = cexp(I*dot(phasecoord, kpt_cart)); for (int p = 0; p < sites[s].total_projs; p++) { values = sites[s].projs[p].values; x[index] += projections[sites[s].index].overlaps[p] * values[ind] * phase; } } } } void onto_projector(kpoint_t* kpt, int band_num, real_proj_site_t* sites, int num_sites, int* G_bounds, double* lattice, double* reclattice, int num_cart_gridpts, int* fftg) { double* k = kpt->k; int* Gs = kpt->Gs; float complex* Cs = kpt->bands[band_num]->Cs; int num_waves = kpt->num_waves; double complex* x = (double complex*) mkl_calloc(fftg[0]*fftg[1]*fftg[2], sizeof(double complex), 64); CHECK_ALLOCATION(x); fft3d(x, G_bounds, lattice, k, Gs, Cs, num_waves, fftg); band_t* band = kpt->bands[band_num]; band->projections = (projection_t*) malloc(num_sites * sizeof(projection_t)); CHECK_ALLOCATION (band->projections); onto_projector_helper(kpt->bands[band_num], x, sites, num_sites, lattice, reclattice, k, num_cart_gridpts, fftg, band->projections); //kpt->bands[band_num]->CRs = x; mkl_free(x); } void onto_projector_ncl(kpoint_t* kpt, int band_num, real_proj_site_t* sites, int num_sites, int* G_bounds, double* lattice, double* reclattice, int num_cart_gridpts, int* fftg) { double* k = kpt->k; int* Gs = kpt->Gs; float complex* Cs = kpt->bands[band_num]->Cs; int num_waves = kpt->num_waves; double complex* xup = (double complex*) mkl_calloc(fftg[0]*fftg[1]*fftg[2], sizeof(double complex), 64); double complex* xdown = (double complex*) mkl_calloc(fftg[0]*fftg[1]*fftg[2], sizeof(double complex), 64); CHECK_ALLOCATION(xup); CHECK_ALLOCATION(xdown); fft3d(xup, G_bounds, lattice, k, Gs, Cs, num_waves/2, fftg); fft3d(xdown, G_bounds, lattice, k, Gs, Cs+num_waves/2, num_waves/2, fftg); band_t* band = kpt->bands[band_num]; band->up_projections = (projection_t*) malloc(num_sites * sizeof(projection_t)); band->down_projections = (projection_t*) malloc(num_sites * sizeof(projection_t)); onto_projector_helper(kpt->bands[band_num], xup, sites, num_sites, lattice, reclattice, k, num_cart_gridpts, fftg, band->up_projections); onto_projector_helper(kpt->bands[band_num], xdown, sites, num_sites, lattice, reclattice, k, num_cart_gridpts, fftg, band->down_projections); } void onto_smoothpw(kpoint_t* kpt, int band_num, real_proj_site_t* sites, int num_sites, int* G_bounds, double* lattice, double* reclattice, int num_cart_gridpts, int* fftg) { double* k = kpt->k; int* Gs = kpt->Gs; float complex* Cs = kpt->bands[band_num]->Cs; int num_waves = kpt->num_waves; double complex* x = (double complex*) mkl_calloc(fftg[0]*fftg[1]*fftg[2], sizeof(double complex), 64); CHECK_ALLOCATION(x); fft3d(x, G_bounds, lattice, k, Gs, Cs, num_waves, fftg); band_t* band = kpt->bands[band_num]; band->wave_projections = (projection_t*) malloc(num_sites * sizeof(projection_t)); CHECK_ALLOCATION (band->wave_projections); onto_projector_helper(kpt->bands[band_num], x, sites, num_sites, lattice, reclattice, k, num_cart_gridpts, fftg, band->wave_projections); mkl_free(x); } void get_aug_freqs(kpoint_t* kpt, int band_num, real_proj_site_t* sites, int num_sites, int* G_bounds, double* lattice, double* reclattice, int num_cart_gridpts, int* fftg) { if (kpt->bands[band_num]->CAs != NULL) { return; } double* k = kpt->k; int* Gs = kpt->Gs; float complex* Cs = kpt->bands[band_num]->Cs; int num_waves = kpt->num_waves; double complex* x = (double complex*) mkl_calloc(fftg[0]*fftg[1]*fftg[2], sizeof(double complex), 64); CHECK_ALLOCATION(x); band_t* band = kpt->bands[band_num]; get_aug_freqs_helper(kpt->bands[band_num], x, sites, num_sites, lattice, reclattice, k, num_cart_gridpts, fftg, band->projections); band->CAs = (float complex*) mkl_calloc(kpt->num_waves, sizeof(float complex), 64); fwd_fft3d(x, G_bounds, lattice, k, Gs, band->CAs, num_waves, fftg); //for (int w = 0; w < kpt->num_waves; w++) { // band->CAs[w] += band->Cs[w]; // printf("%f %f %f %f\n", creal(band->CAs[w]), cimag(band->CAs[w]), // creal(band->Cs[w]), cimag(band->Cs[w])); //} mkl_free(x); } void add_num_cart_gridpts(ppot_t* pp_ptr, double* lattice, int* fftg) { ppot_t pp = *pp_ptr; double maga1 = mag(lattice+0); double maga2 = mag(lattice+3); double maga3 = mag(lattice+6); double vtemp[3]; double vmag, sinphi123; double rmax = pp.rmax; if (pp.wave_grid[pp.wave_gridsize-1] > rmax) { rmax = pp.wave_grid[pp.wave_gridsize-1]; } double phi12 = acos(dot(lattice+0, lattice+3) / (maga1 * maga2)); vcross(vtemp, lattice+0, lattice+3); vmag = mag(vtemp); sinphi123 = dot(lattice+6, vtemp) / (vmag * maga3); double na1maxA = rmax * fftg[0] / (maga1 * fabs(sin(phi12))) + 1; double na2maxA = rmax * fftg[1] / (maga2 * fabs(sin(phi12))) + 1; double na3maxA = rmax * fftg[2] / (maga3 * fabs(sinphi123)) + 1; int npmaxA = (int)(4.0/3.0*PI*na1maxA*na2maxA*na3maxA) + 1; double phi13 = acos(dot(lattice+0, lattice+6) / (maga1 * maga3)); vcross(vtemp, lattice+0, lattice+6); vmag = mag(vtemp); sinphi123 = dot(lattice+3, vtemp) / (vmag * maga2); double na1maxB = rmax * fftg[0] / (maga1 * fabs(sin(phi13))) + 1; double na2maxB = rmax * fftg[1] / (maga2 * fabs(sinphi123)) + 1; double na3maxB = rmax * fftg[2] / (maga3 * fabs(sin(phi13))) + 1; int npmaxB = (int)(4.0/3.0*PI*na1maxB*na2maxB*na3maxB) + 1; double phi23 = acos(dot(lattice+3, lattice+6) / (maga2 * maga3)); vcross(vtemp, lattice+3, lattice+6); vmag = mag(vtemp); sinphi123 = dot(lattice, vtemp) / (vmag * maga1); double na1maxC = rmax * fftg[0] / (maga1 * fabs(sinphi123)) + 1; double na2maxC = rmax * fftg[1] / (maga2 * fabs(sin(phi23))) + 1; double na3maxC = rmax * fftg[2] / (maga3 * fabs(sin(phi23))) + 1; int npmaxC = (int)(4.0/3.0*PI*na1maxC*na2maxC*na3maxC) + 1; int npmax = npmaxA; if (npmaxB > npmax) npmax = npmaxB; if (npmaxC > npmax) npmax = npmaxC; pp_ptr->num_cart_gridpts = npmax; } void make_pwave_overlap_matrices(ppot_t* pp_ptr) { ppot_t pp = *pp_ptr; int size = pp.num_projs * pp.num_projs; double* psov = (double*) calloc(size, sizeof(double)); double* aeov = (double*) calloc(size, sizeof(double)); double* diov = (double*) calloc(size, sizeof(double)); CHECK_ALLOCATION(psov); CHECK_ALLOCATION(aeov); CHECK_ALLOCATION(diov); for (int i = 0; i < pp.num_projs; i++) { for (int j = i; j < pp.num_projs; j++) { if (pp.funcs[i].l == pp.funcs[j].l) { double* ps1 = pp.funcs[i].pswave; double* ps2 = pp.funcs[j].pswave; double* ae1 = pp.funcs[i].aewave; double* ae2 = pp.funcs[j].aewave; double* psprod = (double*) malloc(pp.wave_gridsize*sizeof(double)); double* aeprod = (double*) malloc(pp.wave_gridsize*sizeof(double)); double* diprod = (double*) malloc(pp.wave_gridsize*sizeof(double)); for (int k = 0; k < pp.wave_gridsize; k++) { psprod[k] = ps1[k] * ps2[k]; aeprod[k] = ae1[k] * ae2[k]; diprod[k] = (ae1[k]-ps1[k]) * (ae2[k]-ps2[k]); } double** psspline = spline_coeff(pp.wave_grid, psprod, pp.wave_gridsize); double** aespline = spline_coeff(pp.wave_grid, aeprod, pp.wave_gridsize); double** displine = spline_coeff(pp.wave_grid, diprod, pp.wave_gridsize); psov[i*pp.num_projs+j] = spline_integral(pp.wave_grid, psprod, psspline, pp.wave_gridsize); aeov[i*pp.num_projs+j] = spline_integral(pp.wave_grid, aeprod, aespline, pp.wave_gridsize); diov[i*pp.num_projs+j] = spline_integral(pp.wave_grid, diprod, displine, pp.wave_gridsize); } } } for (int i = 1; i < pp.num_projs; i++) { for (int j = 0; j < i; j++) { psov[pp.num_projs*i+j] = psov[pp.num_projs*j+i]; aeov[pp.num_projs*i+j] = aeov[pp.num_projs*j+i]; diov[pp.num_projs*i+j] = diov[pp.num_projs*j+i]; } } pp_ptr->pspw_overlap_matrix = psov; pp_ptr->aepw_overlap_matrix = aeov; pp_ptr->diff_overlap_matrix = diov; } void setup_projections(pswf_t* wf, ppot_t* pps, int num_elems, int num_sites, int* fftg, int* labels, double* coords) { wf->num_sites = num_sites; wf->fftg = (int*) malloc(3*sizeof(int)); wf->fftg[0] = fftg[0]; wf->fftg[1] = fftg[1]; wf->fftg[2] = fftg[2]; wf->num_elems = num_elems; wf->num_sites = num_sites; wf->pps = pps; printf("started setup_proj\n"); int num_cart_gridpts = 0; for (int p = 0; p < num_elems; p++) { add_num_cart_gridpts(pps+p, wf->lattice, fftg); if (pps[p].num_cart_gridpts > num_cart_gridpts) { num_cart_gridpts = pps[p].num_cart_gridpts; } } int NUM_KPTS = wf->nwk * wf->nspin; int NUM_BANDS = wf->nband; printf("calculating projector_values\n"); real_proj_site_t* sites = projector_values(num_sites, labels, coords, wf->lattice, wf->reclattice, pps, fftg); printf("onto_projector calcs\n"); #if defined(_OPENMP) omp_set_num_threads(omp_get_max_threads()); #endif #pragma omp parallel for for (int w = 0; w < NUM_BANDS * NUM_KPTS; w++) { kpoint_t* kpt = wf->kpts[w % NUM_KPTS]; int band_num = w / NUM_KPTS; onto_projector(kpt, band_num, sites, num_sites, wf->G_bounds, wf->lattice, wf->reclattice, num_cart_gridpts, fftg); if (wf->is_ncl) { onto_projector_ncl(kpt, band_num, sites, num_sites, wf->G_bounds, wf->lattice, wf->reclattice, num_cart_gridpts, fftg); } } printf("Done \n"); free_real_proj_site_list(sites, num_sites); } void overlap_setup_real(pswf_t* wf_R, pswf_t* wf_S, int* labels_R, int* labels_S, double* coords_R, double* coords_S, int* N_R, int* N_S, int* N_RS_R, int* N_RS_S, int num_N_R, int num_N_S, int num_N_RS) { clean_wave_projections(wf_R); clean_wave_projections(wf_S); wf_R->wp_num = num_N_S; wf_S->wp_num = num_N_R; double complex** overlaps = NULL; if (num_N_RS > 0) { overlaps = (double complex**) malloc(num_N_RS * sizeof(double complex*)); CHECK_ALLOCATION(overlaps); } printf("STARTING OVERLAP_SETUP\n"); int NUM_KPTS = wf_R->nwk * wf_R->nspin; int NUM_BANDS = wf_S->nband; int max_num_indices = 0; if (num_N_R > 0) { real_proj_site_t* sites_N_R = smooth_pw_values(num_N_R, N_R, labels_R, coords_R, wf_S->lattice, wf_S->reclattice, wf_R->pps, wf_S->fftg); for (int s = 0; s < num_N_R; s++) { if (sites_N_R[s].num_indices > max_num_indices) { max_num_indices = sites_N_R[s].num_indices; } } #if defined(_OPENMP) omp_set_num_threads(omp_get_max_threads()); #endif #pragma omp parallel for for (int w = 0; w < NUM_BANDS * NUM_KPTS; w++) { kpoint_t* kpt_S = wf_S->kpts[w%NUM_KPTS]; onto_smoothpw(kpt_S, w/NUM_KPTS, sites_N_R, num_N_R, wf_S->G_bounds, wf_S->lattice, wf_S->reclattice, max_num_indices, wf_S->fftg); } free_real_proj_site_list(sites_N_R, num_N_R); } max_num_indices = 0; printf("PART 1 DONE\n"); if (num_N_S > 0) { real_proj_site_t* sites_N_S = smooth_pw_values(num_N_S, N_S, labels_S, coords_S, wf_R->lattice, wf_R->reclattice, wf_S->pps, wf_R->fftg); NUM_BANDS = wf_R->nband; for (int s = 0; s < num_N_S; s++) { if (sites_N_S[s].num_indices > max_num_indices) { max_num_indices = sites_N_S[s].num_indices; } } #if defined(_OPENMP) omp_set_num_threads(omp_get_max_threads()); #endif #pragma omp parallel for for (int w = 0; w < NUM_BANDS * NUM_KPTS; w++) { kpoint_t* kpt_R = wf_R->kpts[w%NUM_KPTS]; onto_smoothpw(kpt_R, w/NUM_KPTS, sites_N_S, num_N_S, wf_R->G_bounds, wf_R->lattice, wf_R->reclattice, max_num_indices, wf_R->fftg); } free_real_proj_site_list(sites_N_S, num_N_S); } printf("PART 2 DONE\n"); double* dcoords = NULL; if (num_N_RS > 0) { dcoords = (double*) malloc(3 * num_N_RS * sizeof(double)); CHECK_ALLOCATION(dcoords); } #if defined(_OPENMP) omp_set_num_threads(omp_get_max_threads()); #endif #pragma omp parallel for for (int i = 0; i < num_N_RS; i++) { double R = 0; int l1, l2; int s1 = N_RS_R[i]; int s2 = N_RS_S[i]; ppot_t pp1 = wf_R->pps[labels_R[s1]]; ppot_t pp2 = wf_S->pps[labels_S[s2]]; // CALCULATE THE DIFF COORD HERE, PASS TO offsite_wave_overlap AND SAVE IT FOR USE IN compensation_terms overlaps[i] = (double complex*) calloc(pp1.total_projs * pp2.total_projs, sizeof(double complex)); CHECK_ALLOCATION(overlaps[i]); double* coord1 = coords_R + 3 * s1; double* coord2 = coords_S + 3 * s2; min_cart_path(coord2, coord1, wf_R->lattice, dcoords + 3*i, &R); int tj = 0; for (int j = 0; j < pp1.num_projs; j++) { l1 = pp1.funcs[j].l; for (int m1 = -l1; m1 <= l1; m1++) { int tk = 0; for (int k = 0; k < pp2.num_projs; k++) { l2 = pp2.funcs[k].l; for (int m2 = -l2; m2 <= l2; m2++) { overlaps[i][tj*pp2.total_projs+tk] = conj( reciprocal_offsite_wave_overlap(dcoords + 3*i, pp1.kwave_grid, pp1.funcs[j].kwave, pp1.funcs[j].kwave_spline, pp1.wave_gridsize, pp2.kwave_grid, pp2.funcs[k].kwave, pp2.funcs[k].kwave_spline, pp2.wave_gridsize, wf_R->lattice, l1, m1, l2, m2) ); tk++; } } tj++; } } } wf_S->overlaps = overlaps; wf_S->dcoords = dcoords; wf_S->num_aug_overlap_sites = num_N_RS; wf_R->num_aug_overlap_sites = num_N_RS; printf("PART 3 DONE\nFINISHED OVERLAP SETUP\n"); } void overlap_setup_recip(pswf_t* wf_R, pswf_t* wf_S, int* labels_R, int* labels_S, double* coords_R, double* coords_S, int* N_R, int* N_S, int* N_RS_R, int* N_RS_S, int num_N_R, int num_N_S, int num_N_RS) { clean_wave_projections(wf_R); clean_wave_projections(wf_S); wf_R->wp_num = num_N_S; wf_S->wp_num = num_N_R; double complex** overlaps = NULL; if (num_N_RS > 0) { overlaps = (double complex**) malloc(num_N_RS * sizeof(double complex*)); CHECK_ALLOCATION(overlaps); } printf("STARTING OVERLAP_SETUP RECIP\n"); int NUM_KPTS = wf_R->nwk * wf_R->nspin; int NUM_BANDS = wf_R->nband; int max_num_indices = 0; if (num_N_R > 0) { real_proj_site_t* sites_N_R = smooth_pw_values(num_N_R, N_R, labels_R, coords_R, wf_R->lattice, wf_R->reclattice, wf_R->pps, wf_R->fftg); for (int s = 0; s < num_N_R; s++) { if (sites_N_R[s].num_indices > max_num_indices) { max_num_indices = sites_N_R[s].num_indices; } } #if defined(_OPENMP) omp_set_num_threads(omp_get_max_threads()); #endif #pragma omp parallel for for (int w = 0; w < NUM_BANDS * NUM_KPTS; w++) { kpoint_t* kpt_R = wf_R->kpts[w%NUM_KPTS]; get_aug_freqs(kpt_R, w/NUM_KPTS, sites_N_R, num_N_R, wf_R->G_bounds, wf_R->lattice, wf_R->reclattice, max_num_indices, wf_R->fftg); } free_real_proj_site_list(sites_N_R, num_N_R); } max_num_indices = 0; printf("PART 1 DONE RECIP\n"); if (num_N_S > 0) { real_proj_site_t* sites_N_S = smooth_pw_values(num_N_S, N_S, labels_S, coords_S, wf_S->lattice, wf_S->reclattice, wf_S->pps, wf_S->fftg); NUM_BANDS = wf_S->nband; for (int s = 0; s < num_N_S; s++) { if (sites_N_S[s].num_indices > max_num_indices) { max_num_indices = sites_N_S[s].num_indices; } } #if defined(_OPENMP) omp_set_num_threads(omp_get_max_threads()); #endif #pragma omp parallel for for (int w = 0; w < NUM_BANDS * NUM_KPTS; w++) { kpoint_t* kpt_S = wf_S->kpts[w%NUM_KPTS]; get_aug_freqs(kpt_S, w/NUM_KPTS, sites_N_S, num_N_S, wf_S->G_bounds, wf_S->lattice, wf_S->reclattice, max_num_indices, wf_S->fftg); } free_real_proj_site_list(sites_N_S, num_N_S); } printf("PART 2 DONE RECIP\n"); double* dcoords = NULL; if (num_N_RS > 0) { dcoords = (double*) malloc(3 * num_N_RS * sizeof(double)); CHECK_ALLOCATION(dcoords); } #if defined(_OPENMP) omp_set_num_threads(omp_get_max_threads()); #endif #pragma omp parallel for for (int i = 0; i < num_N_RS; i++) { double R = 0; int l1, l2; int s1 = N_RS_R[i]; int s2 = N_RS_S[i]; ppot_t pp1 = wf_R->pps[labels_R[s1]]; ppot_t pp2 = wf_S->pps[labels_S[s2]]; // CALCULATE THE DIFF COORD HERE, PASS TO offsite_wave_overlap AND SAVE IT FOR USE IN compensation_terms overlaps[i] = (double complex*) calloc(pp1.total_projs * pp2.total_projs, sizeof(double complex)); CHECK_ALLOCATION(overlaps[i]); double* coord1 = coords_R + 3 * s1; double* coord2 = coords_S + 3 * s2; min_cart_path(coord2, coord1, wf_R->lattice, dcoords + 3*i, &R); int tj = 0; for (int j = 0; j < pp1.num_projs; j++) { l1 = pp1.funcs[j].l; for (int m1 = -l1; m1 <= l1; m1++) { int tk = 0; for (int k = 0; k < pp2.num_projs; k++) { l2 = pp2.funcs[k].l; for (int m2 = -l2; m2 <= l2; m2++) { overlaps[i][tj*pp2.total_projs+tk] = conj( reciprocal_offsite_wave_overlap(dcoords + 3*i, pp1.kwave_grid, pp1.funcs[j].kwave, pp1.funcs[j].kwave_spline, pp1.wave_gridsize, pp2.kwave_grid, pp2.funcs[k].kwave, pp2.funcs[k].kwave_spline, pp2.wave_gridsize, wf_R->lattice, l1, m1, l2, m2) ); tk++; } } tj++; } } } wf_S->overlaps = overlaps; wf_S->dcoords = dcoords; wf_S->num_aug_overlap_sites = num_N_RS; wf_R->num_aug_overlap_sites = num_N_RS; printf("PART 3 DONE RECIP\nFINISHED OVERLAP SETUP\n"); } void compensation_terms(double complex* overlap, int BAND_NUM, pswf_t* wf_S, pswf_t* wf_R, int num_M, int num_N_R, int num_N_S, int num_N_RS, int* M_R, int* M_S, int* N_R, int* N_S, int* N_RS_R, int* N_RS_S, int* proj_labels, double* proj_coords, int* ref_labels, double* ref_coords, int* fft_grid, int flip_spin) { setbuf(stdout, NULL); int NUM_KPTS = wf_R->nwk * wf_R->nspin; int NUM_BANDS = wf_R->nband; //double* overlap = (double*) calloc(2 * NUM_KPTS * NUM_BANDS, sizeof(double)); CHECK_ALLOCATION(overlap); double complex** N_RS_overlaps = wf_S->overlaps; #if defined(_OPENMP) omp_set_num_threads(omp_get_max_threads()); #endif #pragma omp parallel for for (int w = 0; w < NUM_BANDS * NUM_KPTS; w++) { int ni = 0, nj = 0; int kpt_ind_R = w%NUM_KPTS; if (wf_R->nspin == 2 && flip_spin) { if (kpt_ind_R < wf_R->nwk) { kpt_ind_R += wf_R->nwk; } else { kpt_ind_R -= wf_R->nwk; } } kpoint_t* kpt_R = wf_R->kpts[kpt_ind_R]; kpoint_t* kpt_S = wf_S->kpts[w%NUM_KPTS]; band_t* band_R = kpt_R->bands[w/NUM_KPTS]; band_t* band_S = kpt_S->bands[BAND_NUM]; double complex temp = 0 + 0 * I; for (int s = 0; s < num_M; s++) { ppot_t pp = wf_R->pps[ref_labels[M_R[s]]]; int s1 = M_R[s]; int s2 = M_S[s]; projection_t pron = band_R->projections[s1]; projection_t ppron = band_S->projections[s2]; for (int i = 0; i < pron.total_projs; i++) { for (int j = 0; j < ppron.total_projs; j++) { if (pron.ls[i] == ppron.ls[j] && pron.ms[i] == ppron.ms[j]) { nj = ppron.ns[j]; ni = pron.ns[i]; temp += conj(pron.overlaps[i]) * (pp.aepw_overlap_matrix[pp.num_projs*ni+nj] - pp.pspw_overlap_matrix[pp.num_projs*ni+nj]) * ppron.overlaps[j]; } } } } overlap[w] += temp; //overlap[2*w] = creal(temp); //overlap[2*w+1]= cimag(temp); //printf("temp 1 %lf %lf\n", creal(temp), cimag(temp)); temp = 0 + 0 * I; for (int s = 0; s < num_N_R; s++) { int site_num = N_R[s]; projection_t pron = band_R->projections[site_num]; projection_t ppron = band_S->wave_projections[s]; for (int i = 0; i < pron.total_projs; i++) { temp += ppron.overlaps[i] * conj(pron.overlaps[i]); } } overlap[w] += temp; //overlap[2*w] += creal(temp); //overlap[2*w+1]+= cimag(temp); //printf("temp 2 %lf %lf\n", creal(temp), cimag(temp)); temp = 0 + 0 * I; for (int s = 0; s < num_N_S; s++) { int site_num = N_S[s]; projection_t pron = band_R->wave_projections[s]; projection_t ppron = band_S->projections[site_num]; for (int i = 0; i < ppron.total_projs; i++) { temp += conj(pron.overlaps[i]) * ppron.overlaps[i]; } } overlap[w] += temp; //overlap[2*w] += creal(temp); //overlap[2*w+1]+= cimag(temp); //printf("temp 3 %d %d %d %lf %lf\n", kpt_S->num_waves, kpt_R->num_waves, w%NUM_KPTS, creal(temp), cimag(temp)); temp = 0 + 0 * I; for (int s = 0; s < num_N_RS; s++) { int site_num1 = N_RS_R[s]; int site_num2 = N_RS_S[s]; projection_t pron = band_R->projections[site_num1]; projection_t ppron = band_S->projections[site_num2]; for (int i = 0; i < pron.total_projs; i++) { for (int j = 0; j < ppron.total_projs; j++) { temp += conj(pron.overlaps[i]) * (N_RS_overlaps[s][i*ppron.total_projs+j]) * ppron.overlaps[j] * cexp(2*I*PI * dot(kpt_R->k, wf_S->dcoords + 3*s)); } } } overlap[w] += temp; //overlap[2*w] += creal(temp); //overlap[2*w+1]+= cimag(temp); } } void compensation_terms_recip(double complex* overlap, int BAND_NUM, pswf_t* wf_S, pswf_t* wf_R, int num_M, int num_N_R, int num_N_S, int num_N_RS, int* M_R, int* M_S, int* N_R, int* N_S, int* N_RS_R, int* N_RS_S, int* proj_labels, double* proj_coords, int* ref_labels, double* ref_coords, int* fft_grid, int flip_spin) { setbuf(stdout, NULL); int NUM_KPTS = wf_R->nwk * wf_R->nspin; int NUM_BANDS = wf_R->nband; //double* overlap = (double*) calloc(2 * NUM_KPTS * NUM_BANDS, sizeof(double)); CHECK_ALLOCATION(overlap); double complex** N_RS_overlaps = wf_S->overlaps; #if defined(_OPENMP) omp_set_num_threads(omp_get_max_threads()); #endif #pragma omp parallel for for (int w = 0; w < NUM_BANDS * NUM_KPTS; w++) { int ni = 0, nj = 0; int kpt_ind_R = w%NUM_KPTS; if (wf_R->nspin == 2 && flip_spin) { if (kpt_ind_R < wf_R->nwk) { kpt_ind_R += wf_R->nwk; } else { kpt_ind_R -= wf_R->nwk; } } kpoint_t* kpt_R = wf_R->kpts[kpt_ind_R]; kpoint_t* kpt_S = wf_S->kpts[w%NUM_KPTS]; band_t* band_R = kpt_R->bands[w/NUM_KPTS]; band_t* band_S = kpt_S->bands[BAND_NUM]; float complex* C1s = NULL; float complex* C2s = NULL; float complex curr_overlap; int num_waves; if (band_R->CAs != NULL) { curr_overlap = 0; C1s = band_S->Cs; C2s = band_R->CAs; num_waves = kpt_R->num_waves; cblas_cdotc_sub(num_waves, C2s, 1, C1s, 1, &curr_overlap); overlap[w] += (double complex) curr_overlap; } if (band_S->CAs != NULL) { curr_overlap = 0; C1s = band_S->CAs; C2s = band_R->Cs; num_waves = kpt_R->num_waves; cblas_cdotc_sub(num_waves, C2s, 1, C1s, 1, &curr_overlap); overlap[w] += (double complex) curr_overlap; } //printf("part 1 %d %d %d %lf %lf %f %f\n", BAND_NUM, w/NUM_KPTS, w%NUM_KPTS, // creal(overlap[w]), cimag(overlap[w]), // creal(curr_overlap), cimag(curr_overlap)); double complex temp = 0 + 0 * I; for (int s = 0; s < num_M; s++) { ppot_t pp = wf_R->pps[ref_labels[M_R[s]]]; int s1 = M_R[s]; int s2 = M_S[s]; projection_t pron = band_R->projections[s1]; projection_t ppron = band_S->projections[s2]; for (int i = 0; i < pron.total_projs; i++) { for (int j = 0; j < ppron.total_projs; j++) { if (pron.ls[i] == ppron.ls[j] && pron.ms[i] == ppron.ms[j]) { nj = ppron.ns[j]; ni = pron.ns[i]; temp += conj(pron.overlaps[i]) * (pp.aepw_overlap_matrix[pp.num_projs*ni+nj] - pp.pspw_overlap_matrix[pp.num_projs*ni+nj]) * ppron.overlaps[j]; } } } } overlap[w] += temp; //printf("part 2 %lf %lf\n", creal(overlap[w]), cimag(overlap[w])); temp = 0 + 0 * I; for (int s = 0; s < num_N_RS; s++) { int site_num1 = N_RS_R[s]; int site_num2 = N_RS_S[s]; projection_t pron = band_R->projections[site_num1]; projection_t ppron = band_S->projections[site_num2]; for (int i = 0; i < pron.total_projs; i++) { for (int j = 0; j < ppron.total_projs; j++) { temp += conj(pron.overlaps[i]) * (N_RS_overlaps[s][i*ppron.total_projs+j]) * ppron.overlaps[j] * cexp(2*I*PI * dot(kpt_R->k, wf_S->dcoords + 3*s)); } } } overlap[w] += temp; //printf("part 3 %lf %lf\n", creal(overlap[w]), cimag(overlap[w])); //overlap[2*w] += creal(temp); //overlap[2*w+1]+= cimag(temp); } }
DRB097-target-teams-distribute-orig-no.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <stdio.h> #define min(x, y) (((x) < (y)) ? (x) : (y)) /* use of omp target + teams + distribute + parallel for */ int main(int argc, char* argv[]) { int i, i2; int len = 2560; double sum =0.0, sum2=0.0; double a[len], b[len]; /*Initialize with some values*/ for (i=0; i<len; i++) { a[i]= ((double)i)/2.0; b[i]= ((double)i)/3.0; } #pragma omp target map(to: a[0:len], b[0:len]) map(tofrom: sum) #pragma omp teams num_teams(10) thread_limit(256) reduction (+:sum) #pragma omp distribute for (i2=0; i2< len; i2+=256) #pragma omp parallel for reduction (+:sum) for (i=i2;i< min(i2+256, len); i++) sum += a[i]*b[i]; /* CPU reference computation */ #pragma omp parallel for reduction (+:sum2) for (i=0;i< len; i++) sum2 += a[i]*b[i]; printf ("sum=%f sum2=%f\n", sum, sum2); return 0; }
test.c
#include <stdio.h> #include <omp.h> #include "../utilities/check.h" #include "../utilities/utilities.h" #define TRIALS (1) #define N (1024*3) #define INIT() INIT_LOOP(N, {C[i] = 1; D[i] = i; E[i] = -i+1;}) #define ZERO(X) ZERO_ARRAY(N, X) #define DUMP_SUCCESS9() { \ DUMP_SUCCESS(gpu_threads-max_threads); \ DUMP_SUCCESS(gpu_threads-max_threads); \ DUMP_SUCCESS(gpu_threads-max_threads); \ DUMP_SUCCESS(gpu_threads-max_threads); \ DUMP_SUCCESS(gpu_threads-max_threads); \ DUMP_SUCCESS(gpu_threads-max_threads); \ DUMP_SUCCESS(gpu_threads-max_threads); \ DUMP_SUCCESS(gpu_threads-max_threads); \ DUMP_SUCCESS(gpu_threads-max_threads); \ } // // FIXME: // Add support for 'shared', 'lastprivate' // int main(void) { check_offloading(); double A[N], B[N], C[N], D[N], E[N]; double S[N]; double p[2]; int cpuExec = 0; #pragma omp target map(tofrom: cpuExec) { cpuExec = omp_is_initial_device(); } int gpu_threads = 224; int cpu_threads = 32; int max_threads = cpuExec ? cpu_threads : gpu_threads; INIT(); // // Test: proc_bind clause // #undef TARGET_PARALLEL_FOR_CLAUSES #define TARGET_PARALLEL_FOR_CLAUSES proc_bind(master) #include "tpf_defines.h" for (int t = 0; t <= max_threads; t++) { int threads[1]; threads[0] = t; TARGET_PARALLEL_FOR1( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR2( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR3( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR4( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR5( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR6( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR7( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR8( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR9( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) } DUMP_SUCCESS9() #undef TARGET_PARALLEL_FOR_CLAUSES #define TARGET_PARALLEL_FOR_CLAUSES proc_bind(close) #include "tpf_defines.h" for (int t = 0; t <= max_threads; t++) { int threads[1]; threads[0] = t; TARGET_PARALLEL_FOR1( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR2( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR3( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR4( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR5( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR6( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR7( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR8( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR9( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) } DUMP_SUCCESS9() #undef TARGET_PARALLEL_FOR_CLAUSES #define TARGET_PARALLEL_FOR_CLAUSES proc_bind(spread) #include "tpf_defines.h" for (int t = 0; t <= max_threads; t++) { int threads[1]; threads[0] = t; TARGET_PARALLEL_FOR1( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR2( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR3( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR4( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR5( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR6( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR7( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR8( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR9( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) } DUMP_SUCCESS9() // // Test: private, shared clauses on omp target parallel for. // #undef TARGET_PARALLEL_FOR_CLAUSES #define TARGET_PARALLEL_FOR_CLAUSES private(p,q) shared(A,B,C,D,E) #include "tpf_defines.h" // FIXME: shared(a) where 'a' is an implicitly mapped scalar does not work. // FIXME: shared(A) private(A) does not generate correct results. for (int t = 0; t <= max_threads; t++) { int threads[1]; threads[0] = t; TARGET_PARALLEL_FOR1( double p = 2; \ double q = 4; \ S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < N; i++) { \ p = C[i] + D[i]; \ q = D[i] + E[i]; \ A[i] += p; \ B[i] += q; \ }, { double tmp = p + q; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], 6 + N/2*(N+1))) TARGET_PARALLEL_FOR2( double p = 2; \ double q = 4; \ S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < N; i++) { \ p = C[i] + D[i]; \ q = D[i] + E[i]; \ A[i] += p; \ B[i] += q; \ }, { double tmp = p + q; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], 6 + N/2*(N+1))) TARGET_PARALLEL_FOR3( double p = 2; \ double q = 4; \ S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < N; i++) { \ p = C[i] + D[i]; \ q = D[i] + E[i]; \ A[i] += p; \ B[i] += q; \ }, { double tmp = p + q; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], 6 + N/2*(N+1))) TARGET_PARALLEL_FOR4( double p = 2; \ double q = 4; \ S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < N; i++) { \ p = C[i] + D[i]; \ q = D[i] + E[i]; \ A[i] += p; \ B[i] += q; \ }, { double tmp = p + q; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], 6 + N/2*(N+1))) TARGET_PARALLEL_FOR5( double p = 2; \ double q = 4; \ S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < N; i++) { \ p = C[i] + D[i]; \ q = D[i] + E[i]; \ A[i] += p; \ B[i] += q; \ }, { double tmp = p + q; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], 6 + N/2*(N+1))) TARGET_PARALLEL_FOR6( double p = 2; \ double q = 4; \ S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < N; i++) { \ p = C[i] + D[i]; \ q = D[i] + E[i]; \ A[i] += p; \ B[i] += q; \ }, { double tmp = p + q; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], 6 + N/2*(N+1))) TARGET_PARALLEL_FOR7( double p = 2; \ double q = 4; \ S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < N; i++) { \ p = C[i] + D[i]; \ q = D[i] + E[i]; \ A[i] += p; \ B[i] += q; \ }, { double tmp = p + q; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], 6 + N/2*(N+1))) TARGET_PARALLEL_FOR8( double p = 2; \ double q = 4; \ S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < N; i++) { \ p = C[i] + D[i]; \ q = D[i] + E[i]; \ A[i] += p; \ B[i] += q; \ }, { double tmp = p + q; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], 6 + N/2*(N+1))) TARGET_PARALLEL_FOR9( double p = 2; \ double q = 4; \ S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < N; i++) { \ p = C[i] + D[i]; \ q = D[i] + E[i]; \ A[i] += p; \ B[i] += q; \ }, { double tmp = p + q; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], 6 + N/2*(N+1))) } DUMP_SUCCESS9() // // Test: firstprivate clause on omp target parallel for. // #undef TARGET_PARALLEL_FOR_CLAUSES #define TARGET_PARALLEL_FOR_CLAUSES firstprivate(p,q) #include "tpf_defines.h" for (int t = 0; t <= max_threads; t++) { int threads[1]; threads[0] = t; TARGET_PARALLEL_FOR1( double p = -4; \ double q = 4; \ S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i] + p; \ B[i] += D[i] + E[i] + q; \ if (i == N-1) { \ p += 6; \ q += 9; \ } \ }, { double tmp = p + q; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR2( double p = -4; \ double q = 4; \ S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i] + p; \ B[i] += D[i] + E[i] + q; \ if (i == N-1) { \ p += 6; \ q += 9; \ } \ }, { double tmp = p + q; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR3( double p = -4; \ double q = 4; \ S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i] + p; \ B[i] += D[i] + E[i] + q; \ if (i == N-1) { \ p += 6; \ q += 9; \ } \ }, { double tmp = p + q; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR4( double p = -4; \ double q = 4; \ S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i] + p; \ B[i] += D[i] + E[i] + q; \ if (i == N-1) { \ p += 6; \ q += 9; \ } \ }, { double tmp = p + q; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR5( double p = -4; \ double q = 4; \ S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i] + p; \ B[i] += D[i] + E[i] + q; \ if (i == N-1) { \ p += 6; \ q += 9; \ } \ }, { double tmp = p + q; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR6( double p = -4; \ double q = 4; \ S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i] + p; \ B[i] += D[i] + E[i] + q; \ if (i == N-1) { \ p += 6; \ q += 9; \ } \ }, { double tmp = p + q; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR7( double p = -4; \ double q = 4; \ S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i] + p; \ B[i] += D[i] + E[i] + q; \ if (i == N-1) { \ p += 6; \ q += 9; \ } \ }, { double tmp = p + q; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR8( double p = -4; \ double q = 4; \ S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i] + p; \ B[i] += D[i] + E[i] + q; \ if (i == N-1) { \ p += 6; \ q += 9; \ } \ }, { double tmp = p + q; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR9( double p = -4; \ double q = 4; \ S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i] + p; \ B[i] += D[i] + E[i] + q; \ if (i == N-1) { \ p += 6; \ q += 9; \ } \ }, { double tmp = p + q; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) } DUMP_SUCCESS9() #if 0 FIXME // // Test: lastprivate clause on omp target parallel for. // #undef TARGET_PARALLEL_FOR_CLAUSES #define TARGET_PARALLEL_FOR_CLAUSES lastprivate(q) #include "tpf_defines.h" // FIXME: modify to t=1 and in tpf_defines.h to use host after bug fix. // FIXME: variable is not private. for (int t = 2; t <= max_threads; t++) { int threads[1]; threads[0] = t; TARGET_PARALLEL_FOR1( double p[1]; \ double q[1]; \ S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < N; i++) { \ p[0] = C[i] + D[i]; \ q[0] = D[i] + E[i]; \ A[i] = p[0]; \ B[i] = q[0]; \ }, { double tmp = p[0] + q[0]; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N+1+ N/2*(N+1))) } FIXME: private of non-scalar does not work. // // Test: private clause on omp parallel for. // #undef PARALLEL_FOR_CLAUSES #define PARALLEL_FOR_CLAUSES private(p) #include "tpf_defines.h" for (int t = 0; t <= 224; t++) { int threads[1]; threads[0] = t; PARALLEL_FOR( p[0] = 2; p[1] = 4; \ S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < N; i++) { \ p[0] = C[i] + D[i]; \ p[1] = D[i] + E[i]; \ A[i] += p[0]; \ B[i] += p[1]; \ } , { double tmp = p[0] + p[1]; for (int i = 0; i < N; i++) { tmp += A[i] + B[i]; } S[0] += tmp; }, VERIFY(0, 1, S[0], 6 + SUMS * (N/2*(N+1)))) } FIXME: private of non-scalar does not work. // // Test: firstprivate clause on omp parallel for. // #undef PARALLEL_FOR_CLAUSES #define PARALLEL_FOR_CLAUSES firstprivate(p) #include "tpf_defines.h" for (int t = 0; t <= 224; t++) { int threads[1]; threads[0] = t; PARALLEL_FOR( p[0] = -4; p[1] = 4; \ S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i] + p[0]; \ B[i] += D[i] + E[i] + p[1]; \ if (i == N-1) { \ p[0] += 6; \ p[1] += 9; \ } \ } , { double tmp = p[0] + p[1]; for (int i = 0; i < N; i++) { tmp += A[i] + B[i]; } S[0] += tmp; }, VERIFY(0, 1, S[0], SUMS * (N/2*(N+1)))) } #endif // // Test: collapse clause on omp target parallel for. // #undef TARGET_PARALLEL_FOR_CLAUSES #define TARGET_PARALLEL_FOR_CLAUSES collapse(2) #include "tpf_defines.h" for (int t = 0; t <= max_threads; t++) { int threads[1]; threads[0] = t; TARGET_PARALLEL_FOR1( S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < 1024; i++) { \ for (int j = 0; j < 3; j++) { \ A[i*3+j] += C[i*3+j] + D[i*3+j]; \ B[i*3+j] += D[i*3+j] + E[i*3+j]; \ } \ } , { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], (N/2*(N+1)))) TARGET_PARALLEL_FOR2( S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < 1024; i++) { \ for (int j = 0; j < 3; j++) { \ A[i*3+j] += C[i*3+j] + D[i*3+j]; \ B[i*3+j] += D[i*3+j] + E[i*3+j]; \ } \ } , { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], (N/2*(N+1)))) TARGET_PARALLEL_FOR3( S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < 1024; i++) { \ for (int j = 0; j < 3; j++) { \ A[i*3+j] += C[i*3+j] + D[i*3+j]; \ B[i*3+j] += D[i*3+j] + E[i*3+j]; \ } \ } , { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], (N/2*(N+1)))) TARGET_PARALLEL_FOR4( S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < 1024; i++) { \ for (int j = 0; j < 3; j++) { \ A[i*3+j] += C[i*3+j] + D[i*3+j]; \ B[i*3+j] += D[i*3+j] + E[i*3+j]; \ } \ } , { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], (N/2*(N+1)))) TARGET_PARALLEL_FOR5( S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < 1024; i++) { \ for (int j = 0; j < 3; j++) { \ A[i*3+j] += C[i*3+j] + D[i*3+j]; \ B[i*3+j] += D[i*3+j] + E[i*3+j]; \ } \ } , { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], (N/2*(N+1)))) TARGET_PARALLEL_FOR6( S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < 1024; i++) { \ for (int j = 0; j < 3; j++) { \ A[i*3+j] += C[i*3+j] + D[i*3+j]; \ B[i*3+j] += D[i*3+j] + E[i*3+j]; \ } \ } , { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], (N/2*(N+1)))) TARGET_PARALLEL_FOR7( S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < 1024; i++) { \ for (int j = 0; j < 3; j++) { \ A[i*3+j] += C[i*3+j] + D[i*3+j]; \ B[i*3+j] += D[i*3+j] + E[i*3+j]; \ } \ } , { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], (N/2*(N+1)))) TARGET_PARALLEL_FOR8( S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < 1024; i++) { \ for (int j = 0; j < 3; j++) { \ A[i*3+j] += C[i*3+j] + D[i*3+j]; \ B[i*3+j] += D[i*3+j] + E[i*3+j]; \ } \ } , { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], (N/2*(N+1)))) TARGET_PARALLEL_FOR9( S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < 1024; i++) { \ for (int j = 0; j < 3; j++) { \ A[i*3+j] += C[i*3+j] + D[i*3+j]; \ B[i*3+j] += D[i*3+j] + E[i*3+j]; \ } \ } , { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], (N/2*(N+1)))) } DUMP_SUCCESS9() // // Test: ordered clause on omp target parallel for. // #undef TARGET_PARALLEL_FOR_CLAUSES #define TARGET_PARALLEL_FOR_CLAUSES ordered #include "tpf_defines.h" for (int t = 0; t <= max_threads; t++) { int threads[1]; threads[0] = t; TARGET_PARALLEL_FOR1( S[0] = 0; \ , for (int i = 0; i < N; i++) { \ _Pragma("omp ordered") \ S[0] += C[i] + D[i]; \ } , { }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR2( S[0] = 0; \ , for (int i = 0; i < N; i++) { \ _Pragma("omp ordered") \ S[0] += C[i] + D[i]; \ } , { }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR3( S[0] = 0; \ , for (int i = 0; i < N; i++) { \ _Pragma("omp ordered") \ S[0] += C[i] + D[i]; \ } , { }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR4( S[0] = 0; \ , for (int i = 0; i < N; i++) { \ _Pragma("omp ordered") \ S[0] += C[i] + D[i]; \ } , { }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR5( S[0] = 0; \ , for (int i = 0; i < N; i++) { \ _Pragma("omp ordered") \ S[0] += C[i] + D[i]; \ } , { }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR6( S[0] = 0; \ , for (int i = 0; i < N; i++) { \ _Pragma("omp ordered") \ S[0] += C[i] + D[i]; \ } , { }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR7( S[0] = 0; \ , for (int i = 0; i < N; i++) { \ _Pragma("omp ordered") \ S[0] += C[i] + D[i]; \ } , { }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR8( S[0] = 0; \ , for (int i = 0; i < N; i++) { \ _Pragma("omp ordered") \ S[0] += C[i] + D[i]; \ } , { }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR9( S[0] = 0; \ , for (int i = 0; i < N; i++) { \ _Pragma("omp ordered") \ S[0] += C[i] + D[i]; \ } , { }, VERIFY(0, 1, S[0], N/2*(N+1))) } DUMP_SUCCESS9() // // Test: Ensure coalesced scheduling on GPU. // if (cpuExec == 0) { #undef TARGET_PARALLEL_FOR_CLAUSES #define TARGET_PARALLEL_FOR_CLAUSES #include "tpf_defines.h" int threads[1]; threads[0] = 32; TARGET_PARALLEL_FOR1( S[0] = 0; \ for (int i = 0; i < 96; i++) { \ A[i] = 0; \ } \ , for (int i = 0; i < 96; i++) { \ A[i] += i - omp_get_thread_num(); \ } , { double tmp = 0; for (int i = 0; i < 96; i++) { tmp += A[i]; } S[0] = tmp; }, VERIFY(0, 1, S[0], (32*32 + 64*32) )) TARGET_PARALLEL_FOR2( S[0] = 0; \ for (int i = 0; i < 96; i++) { \ A[i] = 0; \ } \ , for (int i = 0; i < 96; i++) { \ A[i] += i - omp_get_thread_num(); \ } , { double tmp = 0; for (int i = 0; i < 96; i++) { tmp += A[i]; } S[0] = tmp; }, VERIFY(0, 1, S[0], (32*32 + 64*32) )) TARGET_PARALLEL_FOR7( S[0] = 0; \ for (int i = 0; i < 96; i++) { \ A[i] = 0; \ } \ , for (int i = 0; i < 96; i++) { \ A[i] += i - omp_get_thread_num(); \ } , { double tmp = 0; for (int i = 0; i < 96; i++) { tmp += A[i]; } S[0] = tmp; }, VERIFY(0, 1, S[0], (32*32 + 64*32) )) } else { DUMP_SUCCESS(3); } return 0; }
mash.h
// Gao Wang (c) 2017-2020 wang.gao@columbia.edu #ifndef _MASH_H #define _MASH_H #include <cmath> #include <armadillo> #include <iostream> #ifdef _OPENMP # include <omp.h> #endif using std::log; using std::exp; using std::sqrt; using arma::uword; using arma::vec; using arma::uvec; using arma::rowvec; using arma::colvec; using arma::mat; using arma::cube; using arma::datum; using arma::zeros; using arma::eye; using arma::size; using arma::accu; using arma::sum; using arma::max; using arma::abs; using arma::sqrt; using arma::pow; using arma::exp; using arma::log; using arma::trace; using arma::trans; using arma::find; using arma::inv; using arma::trimatu; using arma::chol; using arma::dot; using arma::intersect; using arma::find; // CONSTANTS // --------- const double LOG_2PI = log(2.0 * M_PI); const double INV_SQRT_2PI = 1.0 / sqrt(2.0 * M_PI); const double LOG_INV_SQRT_2PI = log(INV_SQRT_2PI); // INLINE FUNCTION DEFINITONS // -------------------------- inline vec dnorm(const vec & x, const vec & mu, const vec & sigma2, bool logd = false) { vec res = LOG_INV_SQRT_2PI - log(sqrt(sigma2)) - pow(x - mu, 2.0) / (2.0 * sigma2); if (logd) return res; else return exp(res); } inline vec dmvnorm_mat(const mat & x, const vec & mean, const mat & sigma, bool logd = false, bool inversed = false) { double xdim = static_cast<double>(x.n_rows); vec out(x.n_cols); mat rooti; // we have previously computed rooti // in R eg rooti <- backsolve(chol(sigma), diag(ncol(x))) if (inversed) { rooti = sigma; } else { try { rooti = trans(inv(trimatu(chol(sigma)))); } catch (const std::runtime_error & error) { if (logd) out.fill(-datum::inf); else out.fill(0.0); for (uword i = 0; i < x.n_cols; ++i) if (accu(abs(x.col(i) - mean)) < 1e-6) out.at(i) = datum::inf; return out; } } double rootisum = sum(log(rooti.diag())); double constants = -(xdim / 2.0) * LOG_2PI; for (unsigned i = 0; i < x.n_cols; i++) { vec z = rooti * (x.col(i) - mean); out.at(i) = constants - 0.5 * sum(z % z) + rootisum; } if (logd == false) { out = exp(out); } return out; } inline double dmvnorm(const vec & x, const vec & mean, const mat & sigma, bool logd = false, bool inversed = false) { mat rooti; if (inversed) { rooti = sigma; } else { try { rooti = trans(inv(trimatu(chol(sigma)))); } catch (const std::runtime_error & error) { double diff = accu(abs(x - mean)); if (logd) return (diff < 1e-6) ? datum::inf : -datum::inf; else return (diff < 1e-6) ? datum::inf : 0.0; } } double rootisum = sum(log(rooti.diag())); double constants = -(static_cast<double>(x.n_elem) / 2.0) * LOG_2PI; vec z = rooti * (x - mean); double out = constants - 0.5 * sum(z % z) + rootisum; if (logd == false) { out = exp(out); } return out; } template <class T, class U> inline T pnorm(const U & x, const T & m, const T & s, bool logd = false, bool lower_tail = true) { // see `normalCDF` function at: // http://en.cppreference.com/w/cpp/numeric/math/erfc T res = 0.5 * arma::erfc((x - m) / s * M_SQRT1_2); // FIXME: unlike R::pnorm(0,0,0) = 1 and R::pnorm(-1,0,0) = 0, here it generates NaN // I manually fix it below. // "s == 0" check is not good enough to ensure that res doesn't have NaN due to division by zero uvec nan = arma::find_nonfinite(0 / s); if (nan.n_elem > 0) { res.elem(intersect(find(x >= m), nan)).ones(); res.elem(intersect(find(x < m), nan)).zeros(); } if (!lower_tail & !logd) { return 1.0 - res; } else if (lower_tail & !logd) { return res; } else if (!lower_tail & logd) { return log(1.0 - res); } else { // (lower_tail & logd) return log(res); } } // a quicker way to compute diag(s) %*% V %*% diag(s) inline mat get_cov(const vec & s, const mat & V, const mat & L) { if (L.is_empty()) { /* return arma::diagmat(s) * V * arma::diagmat(s); */ return (V.each_col() % s).each_row() % s.t(); } else { mat svs = (V.each_col() % s).each_row() % s.t(); return L * svs * L.t(); } } inline mat get_cov(const vec & s, const mat & V) { /* return arma::diagmat(s) * V * arma::diagmat(s); */ return (V.each_col() % s).each_row() % s.t(); } // @title posterior_cov // @param Vinv R x R inverse covariance matrix for the likelihood // @param U R x R prior covariance matrix // @return R x R posterior covariance matrix // @description If bhat is N(b,V) and b is N(0,U) then b|bhat N(mu1,U1). This function returns U1. inline mat get_posterior_cov(const mat & Vinv, const mat & U) { // U %*% solve(Vinv %*% U + diag(nrow(U))) mat S = Vinv * U; S.diag() += 1.0; return U * S.i(); } // @title posterior_mean // @param bhat R vector of observations // @param Vinv R x R inverse covariance matrix for the likelihood // @param U1 R x R posterior covariance matrix, computed using posterior_cov // @return R vector of posterior mean // @description If bhat is N(b,V) and b is N(0,U) then b|bhat N(mu1,U1). This function returns mu1. inline vec get_posterior_mean(const vec & bhat, const mat & Vinv, const mat & U1) { return U1 * Vinv * bhat; } inline mat get_posterior_mean_mat(const mat & bhat, const mat & Vinv, const mat & U1) { return U1 * Vinv * bhat; } // SE CLASS // -------- class SE { public: SE(){ } ~SE(){ } void set(const mat & sbhat, const mat & sbhat_alpha) { s = sbhat; if (sbhat_alpha.is_empty()) s_alpha.ones(sbhat.n_rows, sbhat.n_cols); else s_alpha = sbhat_alpha; } void set(int J, int R) { s.ones(J, R); s_alpha.ones(J, R); } void set_original(const mat & value) { s_orig = value; is_orig_empty = s_orig.is_empty(); } mat get_original() const { if (is_orig_empty) return (s); else return (s_orig); } mat get() const { return (s_alpha); } private: mat s; mat s_orig; mat s_alpha; bool is_orig_empty; }; // FUNCTION DECLARATIONS // --------------------- int mash_compute_posterior(const mat& b_mat, const SE& s_obj, const mat& v_mat, const mat& l_mat, const mat& a_mat, const cube& U_cube, const cube& Vinv_cube, const cube& U0_cube, mat& post_mean, mat& post_var, mat& neg_prob, mat& zero_prob, cube& post_cov, const mat& posterior_weights, const int& report_type); int mash_compute_posterior_comcov(const mat& b_mat, const SE & s_obj, const mat & v_mat, const mat & l_mat, const mat & a_mat, const cube & U_cube, const cube & Vinv_cube, const cube & U0_cube, mat & post_mean, mat & post_var, mat & neg_prob, mat & zero_prob, cube & post_cov, const mat & posterior_weights, const int & report_type); int mvsermix_compute_posterior(const mat& b_mat, const mat & s_mat, mat & v_mat, cube & U_cube, cube & Vinv_cube, cube & U0_cube, cube & Uinv_cube, mat & post_mean, mat & post_var, mat & neg_prob, mat & zero_prob, cube & post_cov, vec & prior_scalar, const mat & posterior_weights, const mat & posterior_variable_weights); int mvsermix_compute_posterior_comcov(const mat& b_mat, const mat & s_mat, const mat & v_mat, const cube & U_cube, const cube & Vinv_cube, const cube & U0_cube, const cube & Uinv_cube, mat & post_mean, mat & post_var, mat & neg_prob, mat & zero_prob, cube & post_cov, vec & prior_scalar, const mat & posterior_weights, const mat & posterior_variable_weights); // POSTERIORMASH CLASS // ------------------- // @param b_mat R by J // @param s_mat R by J // @param s_orig_mat R by J // @param s_alpha_mat R by J // @param v_mat R by R // @param l_mat R by R for the common baseline application (@Yuxin Zou) // @param a_mat Q by R for the common baseline application (@Yuxin Zou) // @param U_cube list of prior covariance matrices, for each mixture component P by R by R class PosteriorMASH { public: PosteriorMASH(const mat & b_mat, const mat & s_mat, const mat & s_alpha_mat, const mat & s_orig_mat, const mat & v_mat, const mat & l_mat, const mat & a_mat, const cube & U_cube) : b_mat(b_mat), v_mat(v_mat), l_mat(l_mat), a_mat(a_mat), U_cube(U_cube) { int J = b_mat.n_cols, R = b_mat.n_rows; if (s_mat.is_empty()) s_obj.set(R, J); else s_obj.set(s_mat, s_alpha_mat); s_obj.set_original(s_orig_mat); if (!a_mat.is_empty()) { R = a_mat.n_rows; } post_mean.set_size(R, J); post_var.set_size(R, J); post_cov.set_size(R, R, J); neg_prob.set_size(R, J); zero_prob.set_size(R, J); post_mean.zeros(); post_var.zeros(); post_cov.zeros(); neg_prob.zeros(); zero_prob.zeros(); #ifdef _OPENMP omp_set_num_threads(1); #endif } ~PosteriorMASH(){ } // @title Compute posterior matrices // @description More detailed description of function goes here. // @param posterior_weights P X J matrix, the posterior probabilities of each mixture component for each effect // @param report_type an integer: 1 for posterior mean only, 2 for posterior second moment, 3 for default mash output, 4 for additionally posterior covariance int compute_posterior(const mat & posterior_weights, const int & report_type) { return mash_compute_posterior(b_mat, s_obj, v_mat, l_mat, a_mat, U_cube, Vinv_cube, U0_cube, post_mean, post_var, neg_prob, zero_prob, post_cov, posterior_weights, report_type); } // @title Compute posterior matrices when covariance SVS is the same for all J conditions // @description More detailed description of function goes here. // @param posterior_weights P X J matrix, the posterior probabilities of each mixture component for each effect // @param report_type an integer: 1 for posterior mean only, 2 for posterior second moment, 3 for default mash output, 4 for additionally posterior covariance int compute_posterior_comcov(const mat & posterior_weights, const int & report_type) { return mash_compute_posterior_comcov(b_mat, s_obj, v_mat, l_mat, a_mat, U_cube, Vinv_cube, U0_cube, post_mean, post_var, neg_prob, zero_prob, post_cov, posterior_weights, report_type); } // compute_posterior_comcov // initializing some optinally precomputed quantities int set_vinv(const cube & value) { Vinv_cube = value; return 0; } int set_U0(const cube & value) { U0_cube = value; return 0; } int set_thread(const int & value) { #ifdef _OPENMP omp_set_num_threads(value); #endif return 0; } // @return PosteriorMean JxR matrix of posterior means // @return PosteriorSD JxR matrix of posterior (marginal) standard deviations // @return NegativeProb JxR matrix of posterior (marginal) probability of being negative // @return ZeroProb JxR matrix of posterior (marginal) probability of being zero mat PosteriorMean(){ return post_mean.t(); } mat PosteriorSD(){ return sqrt(post_var).t(); } cube PosteriorCov(){ return post_cov; } mat NegativeProb(){ return neg_prob.t(); } mat ZeroProb(){ return zero_prob.t(); } private: // input mat b_mat; SE s_obj; mat v_mat; mat l_mat; mat a_mat; cube U_cube; cube Vinv_cube; cube U0_cube; // output // all R X J mat mat post_mean; mat post_var; mat neg_prob; mat zero_prob; // J X R X R cube cube post_cov; }; // POSTERIORASH CLASS // ------------------ // @param b_vec of J // @param s_vec of J // @param s_alpha_vec of J // @param v double // @param U_vec of P class PosteriorASH { public: PosteriorASH(const vec & b_vec, const vec & s_vec, const vec & s_alpha, double v, const vec & U_vec) : b_vec(b_vec), s_vec(s_vec), v(v), U_vec(U_vec) { int J = b_vec.n_elem; if (s_alpha.is_empty()) s_alpha_vec.ones(J); else s_alpha_vec = s_alpha; post_mean.set_size(J); post_var.set_size(J); neg_prob.set_size(J); zero_prob.set_size(J); } ~PosteriorASH(){ } // @title Compute posterior matrices // @description univariate version of PosteriorMASH::compute_posterior(), same logic // @param posterior_weights P X J matrix, the posterior probabilities of each mixture component for each effect int compute_posterior(const mat & posterior_weights) { vec vinv = 1 / (s_vec % s_vec * v); unsigned J = b_vec.n_elem; unsigned P = U_vec.n_elem; vec mean(J, arma::fill::zeros); // J X P matrices mat mu1_mat(J, P, arma::fill::zeros); mat mu2_mat(J, P, arma::fill::zeros); mat zero_mat(J, P, arma::fill::zeros); mat neg_mat(J, P, arma::fill::zeros); for (uword p = 0; p < P; ++p) { vec U1 = U_vec.at(p) / (vinv * U_vec.at(p) + 1.0); mu1_mat.col(p) = U1 % vinv % b_vec % s_alpha_vec; U1 = U1 % (s_alpha_vec % s_alpha_vec); mu2_mat.col(p) = pow(mu1_mat.col(p), 2.0) + U1; vec sigma = sqrt(U1); neg_mat.col(p) = pnorm(mu1_mat.col(p), mean, sigma); for (uword j = 0; j < J; ++j) { if (U1.at(j) == 0) { zero_mat.at(j, p) = 1.0; neg_mat.at(j, p) = 0.0; } } } // compute weighted means of posterior arrays for (uword j = 0; j < J; ++j) { post_mean.at(j) = dot(mu1_mat.row(j), posterior_weights.col(j)); post_var.at(j) = dot(mu2_mat.row(j), posterior_weights.col(j)); neg_prob.at(j) = dot(neg_mat.row(j), posterior_weights.col(j)); zero_prob.at(j) = dot(zero_mat.row(j), posterior_weights.col(j)); } post_var -= pow(post_mean, 2.0); return 0; } // compute_posterior // @return PosteriorMean J vec of posterior means // @return PosteriorSD J vec of posterior (marginal) standard deviations // @return NegativeProb J vec of posterior (marginal) probability of being negative // @return ZeroProb J vec of posterior (marginal) probability of being zero vec PosteriorMean(){ return post_mean; } vec PosteriorSD(){ return sqrt(post_var); } vec PosteriorCov(){ return post_var; } vec NegativeProb(){ return neg_prob; } vec ZeroProb(){ return zero_prob; } private: // input of J vecs vec b_vec; vec s_vec; vec s_alpha_vec; double v; vec U_vec; // output of J vecs vec post_mean; vec post_var; vec neg_prob; vec zero_prob; }; // MVSERMIX CLASS // -------------- // @title Inferences for Multivariate Single Effect Regression with Mixture prior // @param b_mat R by J // @param s_mat R by J // @param v_mat R by R // @param U_cube list of prior covariance matrices, for each mixture component P by R by R class MVSERMix { public: MVSERMix(const mat & b_mat, const mat & s_mat, const mat & v_mat, const cube & U_cube) : b_mat(b_mat), s_mat(s_mat), v_mat(v_mat), U_cube(U_cube) { int J = b_mat.n_cols, R = b_mat.n_rows; post_mean.set_size(R, J); post_var.set_size(R, J); post_cov.set_size(R, R, J); neg_prob.set_size(R, J); zero_prob.set_size(R, J); post_mean.zeros(); post_var.zeros(); post_cov.zeros(); neg_prob.zeros(); zero_prob.zeros(); prior_scalar.set_size(U_cube.n_slices); #ifdef _OPENMP omp_set_num_threads(1); #endif } ~MVSERMix(){ } // @title Compute posterior matrices and EM updates for prior scalar estimate // @description Make posterior inferences, and also perform the EM update for prior scalar, for mvSuSiE model. // @param posterior_weights P X J matrix, the posterior probabilities of each mixture component for each effect. // @param posterior_variable_weights P X J matrix, the posterior inclusion probabilities of each effect in a single-effect model. // posterior_variable_weights is only relevant when EM updates for prior scalar is needed. int compute_posterior(const mat & posterior_weights, const mat & posterior_variable_weights) { return mvsermix_compute_posterior(b_mat, s_mat, v_mat, U_cube, Vinv_cube, U0_cube, Uinv_cube, post_mean, post_var, neg_prob, zero_prob, post_cov, prior_scalar, posterior_weights, posterior_variable_weights); } // compute_posterior // @title Compute posterior matrices when covariance SVS is the same for all J conditions // @description More detailed description of function goes here. // @param posterior_weights P X J matrix, the posterior probabilities of each mixture component for each effect int compute_posterior_comcov(const mat & posterior_weights, const mat & posterior_variable_weights) { return mvsermix_compute_posterior_comcov(b_mat, s_mat, v_mat, U_cube, Vinv_cube, U0_cube, Uinv_cube, post_mean, post_var, neg_prob, zero_prob, post_cov, prior_scalar, posterior_weights, posterior_variable_weights); } // compute_posterior_comcov // initializing some optinally precomputed quantities int set_Vinv(const cube & value) { Vinv_cube = value; return 0; } int set_U0(const cube & value) { U0_cube = value; return 0; } int set_Uinv(const cube & value) { Uinv_cube = value; return 0; } int set_thread(const int & value) { #ifdef _OPENMP omp_set_num_threads(value); #endif return 0; } // @return PosteriorMean JxR matrix of posterior means // @return PosteriorSD JxR matrix of posterior (marginal) standard deviations // @return NegativeProb JxR matrix of posterior (marginal) probability of being negative // @return ZeroProb JxR matrix of posterior (marginal) probability of being zero mat PosteriorMean(){ return post_mean.t(); } mat PosteriorSD(){ return sqrt(post_var).t(); } cube PosteriorCov(){ return post_cov; } mat NegativeProb(){ return neg_prob.t(); } mat ZeroProb(){ return zero_prob.t(); } vec PriorScalar(){ return prior_scalar; } private: // input mat b_mat; mat s_mat; mat v_mat; cube U_cube; cube Vinv_cube; cube U0_cube; cube Uinv_cube; // output // all R X J mat mat post_mean; mat post_var; mat neg_prob; mat zero_prob; // J X R X R cube cube post_cov; // P vector of scalars vec prior_scalar; }; // Softmax functions: yi = exp(xi) / sum(exp(xj)) inline vec softmax(const vec & x) { // Calculate exp() // Subtract the max - this prevents overflow, which happens for x ~ 1000 vec y = exp(x - max(x)); // Renormalise y /= sum(y); return y; } // function for "shrinking" the covariance matrix, to get $\hat U_k$. inline mat shrink_cov(const mat & V, const double & eps) { vec eigval; mat eigvec; eig_sym(eigval, eigvec, V); for (uword i = 0; i < eigval.n_elem; ++i) { eigval(i) = (eigval(i) > 1.0) ? eigval(i) : (1.0 + eps); } return eigvec * diagmat(eigval) * trans(eigvec); } // TEEM CLASS // ---------- // @title Truncated Eigenvalue Extreme deconvolution // @description ... // @param X // @param w // @param U // @param maxiter // @param tol // @param verbose class TEEM { public: TEEM(const mat & X_mat, const vec & w_vec, const cube & U_cube) : X_mat(X_mat), w_vec(w_vec) { T_cube = U_cube; for (unsigned j = 0; j < T_cube.n_slices; ++j) { T_cube.slice(j) += eye(size(T_cube.slice(j))); } } ~TEEM(){ } vec get_objective() const { return objective; } vec get_maxd() const { return maxd; } vec get_w() const { return w_vec; } cube get_U() const { cube U_cube = T_cube; for (unsigned j = 0; j < U_cube.n_slices; ++j) { U_cube.slice(j) -= eye(size(U_cube.slice(j))); } return U_cube; } int fit(const int & maxiter, const double & converge_tol, const double & eigen_tol, const bool & verbose) { // initialize to store progress objective.zeros(maxiter); maxd.zeros(maxiter); int iter_out = 0; // Get the number of samples (n) and the number of mixture components (k) unsigned int n = X_mat.n_rows; unsigned int k = w_vec.size(); for (unsigned int iter = 0; iter < (unsigned int) maxiter; ++iter) { // store parameters and likelihood in the previous step vec w0_vec = w_vec; // E-step: calculate posterior probabilities using the current mu and sigmas mat logP = zeros<mat>(n, k); // n by k matrix for (unsigned j = 0; j < k; ++j) { logP.col(j) = log(w_vec(j)) + dmvnorm_mat(trans(X_mat), zeros<vec>( X_mat.n_cols), T_cube.slice(j), true); // ?? } // softmax for renormalization mat P_mat = zeros<mat>(k, n); // k by n matrix. because of row/col vec converting for (uword i = 0; i < n; ++i) { colvec y = arma::conv_to<colvec>::from(logP.row(i)); P_mat.col(i) = softmax(y); } P_mat = trans(P_mat); // n by k matrix // M-step: for (unsigned int j = 0; j < k; ++j) { T_cube.slice(j) = trans(X_mat) * (P_mat.col(j) % X_mat.each_col()) / accu(P_mat.col(j)); T_cube.slice(j) = shrink_cov(T_cube.slice(j), eigen_tol); } // update mixture weights w_vec = arma::conv_to<colvec>::from(sum(P_mat, 0)) / n; // 0:sum by column; // Compute log-likelihood at the current estimates double f = compute_loglik(); // Check stopping criterion double d = max(abs(w_vec - w0_vec)); maxd(iter) = d; objective(iter) = f; iter_out = iter; if (d < converge_tol) { break; } } objective.resize(iter_out + 1); maxd.resize(iter_out + 1); return 0; } // fit private: mat X_mat; vec w_vec; cube T_cube; vec objective; vec maxd; double compute_loglik() { unsigned int n = X_mat.n_rows; unsigned int k = w_vec.size(); vec y = zeros<vec>(n); for (unsigned int j = 0; j < k; ++j) { y = y + w_vec(j) * dmvnorm_mat(trans(X_mat), zeros<vec>(X_mat.n_cols), T_cube.slice(j)); } return (sum(log(y))); } }; // FUNCTION DEFINITIONS // -------------------- // @title calc_lik // @description computes matrix of likelihoods for each of J cols of Bhat for each of P prior covariances // @param b_mat R by J // @param s_mat R by J // @param v_mat R by R // @param l_mat R by R for the common baseline application (@Yuxin Zou) // @param U_cube list of prior covariance matrices // @param sigma_cube list of sigma which is result of get_cov(s_mat, v_mat, l_mat) // @param logd if true computes log-likelihood // @param common_cov if true use version for common covariance // @return J x P matrix of multivariate normal likelihoods, p(bhat | U[p], V) mat calc_lik(const mat & b_mat, const mat & s_mat, const mat & v_mat, const mat & l_mat, const cube & U_cube, const cube & sigma_cube, bool logd, bool common_cov, int n_thread = 1) { // In armadillo data are stored with column-major ordering // slicing columns are therefore faster than rows // lik is a J by P matrix mat lik(b_mat.n_cols, U_cube.n_slices, arma::fill::zeros); vec mean(b_mat.n_rows, arma::fill::zeros); mat sigma; #ifdef _OPENMP omp_set_num_threads(n_thread); #endif if (common_cov) { if (!sigma_cube.is_empty()) sigma = sigma_cube.slice(0); else sigma = get_cov(s_mat.col(0), v_mat, l_mat); #pragma omp parallel for default(none) schedule(static) shared(lik, U_cube, mean, sigma, logd, b_mat) for (uword p = 0; p < lik.n_cols; ++p) { lik.col(p) = dmvnorm_mat(b_mat, mean, sigma + U_cube.slice(p), logd); } } else { #pragma \ omp parallel for default(none) schedule(static) shared(lik, mean, logd, U_cube, b_mat, sigma_cube, l_mat, v_mat, s_mat) private(sigma) for (uword j = 0; j < lik.n_rows; ++j) { if (!sigma_cube.is_empty()) sigma = sigma_cube.slice(j); else sigma = get_cov(s_mat.col(j), v_mat, l_mat); for (uword p = 0; p < lik.n_cols; ++p) { lik.at(j, p) = dmvnorm(b_mat.col(j), mean, sigma + U_cube.slice(p), logd); } } } return lik; } // @title calc_lik multivariate common cov version with sigma inverse precomputed // @description computes matrix of likelihoods for each of J cols of Bhat for each of P prior covariances // @param b_mat R by J // @param rooti_cube R by R by P, or R by R by J by P, if common_cov is False // @param logd if true computes log-likelihood // @param common_cov if true use version for common covariance // @return J x P matrix of multivariate normal likelihoods, p(bhat | U[p], V) mat calc_lik(const mat & b_mat, const cube & rooti_cube, bool logd, bool common_cov, int n_thread = 1) { #ifdef _OPENMP omp_set_num_threads(n_thread); #endif // In armadillo data are stored with column-major ordering // slicing columns are therefore faster than rows // lik is a J by P matrix int P; if (common_cov) P = rooti_cube.n_slices; else P = rooti_cube.n_slices / b_mat.n_cols; mat lik(b_mat.n_cols, P, arma::fill::zeros); vec mean(b_mat.n_rows, arma::fill::zeros); if (common_cov) { #pragma omp parallel for default(none) schedule(static) shared(lik, mean, logd, rooti_cube, b_mat) for (uword p = 0; p < lik.n_cols; ++p) { lik.col(p) = dmvnorm_mat(b_mat, mean, rooti_cube.slice(p), logd, true); } } else { #pragma omp parallel for default(none) schedule(static) shared(lik, mean, logd, rooti_cube, b_mat) for (uword j = 0; j < lik.n_rows; ++j) { for (uword p = 0; p < lik.n_cols; ++p) { lik.at(j, p) = dmvnorm(b_mat.col(j), mean, rooti_cube.slice(j * lik.n_cols + p), logd, true); } } } return lik; } // @title calc_lik univariate version // @description computes matrix of likelihoods for each of J cols of Bhat for each of P prior sigma // @param b_vec of J // @param s_vec of J // @param v numeric // @param U_vec P vector // @param logd if true computes log-likelihood // @return J x P matrix of multivariate normal likelihoods, p(bhat | U[p], V) mat calc_lik(const vec & b_vec, const vec & s_vec, double v, const vec & U_vec, bool logd) { mat lik(b_vec.n_elem, U_vec.n_elem, arma::fill::zeros); vec sigma = s_vec % s_vec * v; vec mean(b_vec.n_elem, arma::fill::zeros); for (uword p = 0; p < lik.n_cols; ++p) { lik.col(p) = dnorm(b_vec, mean, sigma + U_vec.at(p), logd); } return lik; } // This implements the core part of the compute_posterior method in // the PosteriorMASH class. int mash_compute_posterior(const mat& b_mat, const SE& s_obj, const mat& v_mat, const mat& l_mat, const mat& a_mat, const cube& U_cube, const cube& Vinv_cube, const cube& U0_cube, mat& post_mean, mat& post_var, mat& neg_prob, mat& zero_prob, cube& post_cov, const mat& posterior_weights, const int& report_type) { vec mean(post_mean.n_rows); mean.fill(0); #pragma \ omp parallel for schedule(static) default(none) shared(posterior_weights, report_type, mean, post_mean, post_var, neg_prob, zero_prob, post_cov, b_mat, s_obj, l_mat, v_mat, a_mat, U_cube, Vinv_cube, U0_cube) for (uword j = 0; j < post_mean.n_cols; ++j) { // FIXME: improved math may help here mat Vinv_j; if (Vinv_cube.is_empty()) Vinv_j = inv_sympd(get_cov(s_obj.get_original().col(j), v_mat, l_mat)); else Vinv_j = Vinv_cube.slice(j); // R X P matrices mat mu1_mat(post_mean.n_rows, U_cube.n_slices); mat diag_mu2_mat(post_mean.n_rows, U_cube.n_slices); mat zero_mat(post_mean.n_rows, U_cube.n_slices); mat neg_mat(post_mean.n_rows, U_cube.n_slices); mu1_mat.fill(0); diag_mu2_mat.fill(0); zero_mat.fill(0); for (uword p = 0; p < U_cube.n_slices; ++p) { // mat U1(post_mean.n_rows, post_mean.n_rows); mat U0; U1.fill(0); if (U0_cube.is_empty()) U0 = get_posterior_cov(Vinv_j, U_cube.slice(p)); else U0 = U0_cube.slice(j * U_cube.n_slices + p); if (a_mat.is_empty()) { mu1_mat.col(p) = get_posterior_mean(b_mat.col(j), Vinv_j, U0) % s_obj.get().col(j); U1 = (U0.each_col() % s_obj.get().col(j)).each_row() % s_obj.get().col(j).t(); } else { mu1_mat.col(p) = a_mat * (get_posterior_mean(b_mat.col(j), Vinv_j, U0) % s_obj.get().col(j)); U1 = a_mat * (((U0.each_col() % s_obj.get().col(j)).each_row() % s_obj.get().col(j).t()) * a_mat.t()); } if (report_type == 2 || report_type == 4) { post_cov.slice(j) += posterior_weights.at(p, j) * (U1 + mu1_mat.col(p) * mu1_mat.col(p).t()); } vec sigma = sqrt(U1.diag()); // U1.diag() is the posterior covariance diag_mu2_mat.col(p) = pow(mu1_mat.col(p), 2.0) + U1.diag(); neg_mat.col(p) = pnorm(mu1_mat.col(p), mean, sigma); for (uword r = 0; r < sigma.n_elem; ++r) { if (sigma.at(r) == 0) { zero_mat.at(r, p) = 1.0; neg_mat.at(r, p) = 0.0; } } } // compute weighted means of posterior arrays post_mean.col(j) = mu1_mat * posterior_weights.col(j); post_var.col(j) = diag_mu2_mat * posterior_weights.col(j); neg_prob.col(j) = neg_mat * posterior_weights.col(j); zero_prob.col(j) = zero_mat * posterior_weights.col(j); // if (report_type == 4) post_cov.slice(j) -= post_mean.col(j) * post_mean.col(j).t(); } post_var -= pow(post_mean, 2.0); return 0; } // mash_compute_posterior // This implements the core part of the compute_posterior_comcov method in // the PosteriorMASH class. int mash_compute_posterior_comcov(const mat& b_mat, const SE & s_obj, const mat & v_mat, const mat & l_mat, const mat & a_mat, const cube & U_cube, const cube & Vinv_cube, const cube & U0_cube, mat & post_mean, mat & post_var, mat & neg_prob, mat & zero_prob, cube & post_cov, const mat & posterior_weights, const int & report_type) { mat mean(post_mean.n_rows, post_mean.n_cols); mean.fill(0); // R X R mat Vinv; if (Vinv_cube.is_empty()) Vinv = inv_sympd(get_cov(s_obj.get_original().col(0), v_mat, l_mat)); else Vinv = Vinv_cube.slice(0); rowvec ones(post_mean.n_cols); rowvec zeros(post_mean.n_cols); ones.fill(1); zeros.fill(0); #pragma \ omp parallel for schedule(static) default(none) shared(posterior_weights, report_type, mean, Vinv, ones, zeros, post_mean, post_var, neg_prob, zero_prob, post_cov, b_mat, s_obj, a_mat, U_cube, U0_cube) for (uword p = 0; p < U_cube.n_slices; ++p) { mat zero_mat(post_mean.n_rows, post_mean.n_cols); // R X R mat U1(post_mean.n_rows, post_mean.n_rows); // R X J mat mu1_mat(post_mean.n_rows, post_mean.n_cols); mat U0; zero_mat.fill(0); U1.fill(0); mu1_mat.fill(0); if (U0_cube.is_empty()) U0 = get_posterior_cov(Vinv, U_cube.slice(p)); else U0 = U0_cube.slice(p); if (a_mat.is_empty()) { mu1_mat = get_posterior_mean_mat(b_mat, Vinv, U0) % s_obj.get(); U1 = (U0.each_col() % s_obj.get().col(0)).each_row() % s_obj.get().col(0).t(); } else { mu1_mat = a_mat * (get_posterior_mean_mat(b_mat, Vinv, U0) % s_obj.get()); U1 = a_mat * (((U0.each_col() % s_obj.get().col(0)).each_row() % s_obj.get().col(0).t()) * a_mat.t()); } // R X J mat diag_mu2_mat = pow(mu1_mat, 2.0); diag_mu2_mat.each_col() += U1.diag(); // R X J // FIXME: any better way to init sigma? mat sigma(post_mean.n_rows, post_mean.n_cols); sigma.fill(0); sigma.each_col() += sqrt(U1.diag()); // U1.diag() is the posterior covariance mat neg_mat = pnorm(mu1_mat, mean, sigma); for (uword r = 0; r < sigma.n_rows; ++r) { if (sigma.at(r, 0) == 0) { zero_mat.row(r) = ones; neg_mat.row(r) = zeros; } } // compute weighted means of posterior arrays #pragma omp critical { post_mean += mu1_mat.each_row() % posterior_weights.row(p); post_var += diag_mu2_mat.each_row() % posterior_weights.row(p); neg_prob += neg_mat.each_row() % posterior_weights.row(p); zero_prob += zero_mat.each_row() % posterior_weights.row(p); if (report_type == 2 || report_type == 4) { for (uword j = 0; j < post_mean.n_cols; ++j) { post_cov.slice(j) += posterior_weights.at(p, j) * (U1 + mu1_mat.col(j) * mu1_mat.col(j).t()); } } } } post_var -= pow(post_mean, 2.0); // if (report_type == 4) { #pragma omp parallel for schedule(static) default(none) shared(post_cov, post_mean) for (uword j = 0; j < post_mean.n_cols; ++j) { post_cov.slice(j) -= post_mean.col(j) * post_mean.col(j).t(); } } return 0; } // mash_compute_posterior_comcov // This implements the core part of the compute_posterior method in // the MVSERMix class. int mvsermix_compute_posterior(const mat& b_mat, const mat & s_mat, mat & v_mat, cube & U_cube, cube & Vinv_cube, cube & U0_cube, cube & Uinv_cube, mat & post_mean, mat & post_var, mat & neg_prob, mat & zero_prob, cube & post_cov, vec & prior_scalar, const mat & posterior_weights, const mat & posterior_variable_weights) { vec mean(post_mean.n_rows); mean.fill(0); // This is meant to store a length P of 2nd moment matrices, // each element is \sum_j posterior_{p,j} * mu2_{p,j} cube Eb2_cube; bool to_estimate_prior = !posterior_variable_weights.is_empty(); if (to_estimate_prior) { // we will compute the EM update for prior scalar here // for use with mmbr package Eb2_cube.set_size(post_mean.n_rows, post_mean.n_rows, U_cube.n_slices); Eb2_cube.zeros(); } #pragma \ omp parallel for schedule(static) default(none) shared(posterior_weights, posterior_variable_weights, to_estimate_prior, mean, Eb2_cube, post_mean, post_var, neg_prob, zero_prob, post_cov, prior_scalar, b_mat, s_mat, v_mat, U_cube, Vinv_cube, U0_cube, Uinv_cube) for (uword j = 0; j < post_mean.n_cols; ++j) { // FIXME: improved math may help here mat Vinv_j; if (Vinv_cube.is_empty()) Vinv_j = inv_sympd(get_cov(s_mat.col(j), v_mat)); else Vinv_j = Vinv_cube.slice(j); // R X P matrices mat mu1_mat(post_mean.n_rows, U_cube.n_slices); mat diag_mu2_mat(post_mean.n_rows, U_cube.n_slices); mat zero_mat(post_mean.n_rows, U_cube.n_slices); mat neg_mat(post_mean.n_rows, U_cube.n_slices); mu1_mat.fill(0); diag_mu2_mat.fill(0); zero_mat.fill(0); neg_mat.fill(0); // R X R X P cube mu2_cube; mu2_cube.set_size(post_mean.n_rows, post_mean.n_rows, U_cube.n_slices); for (uword p = 0; p < U_cube.n_slices; ++p) { mat U1; if (U0_cube.is_empty()) U1 = get_posterior_cov(Vinv_j, U_cube.slice(p)); else U1 = U0_cube.slice(j * U_cube.n_slices + p); mu1_mat.col(p) = get_posterior_mean(b_mat.col(j), Vinv_j, U1); // this is posterior 2nd moment for the j-th variable and the p-th prior mu2_cube.slice(p) = U1 + mu1_mat.col(p) * mu1_mat.col(p).t(); // add to posterior 2nd moment contribution of the p-th component post_cov.slice(j) += posterior_weights.at(p, j) * mu2_cube.slice(p); vec sigma = sqrt(U1.diag()); // U1.diag() is the posterior covariance diag_mu2_mat.col(p) = pow(mu1_mat.col(p), 2.0) + U1.diag(); neg_mat.col(p) = pnorm(mu1_mat.col(p), mean, sigma); for (uword r = 0; r < sigma.n_elem; ++r) { if (sigma.at(r) == 0) { zero_mat.at(r, p) = 1.0; neg_mat.at(r, p) = 0.0; } } } // compute weighted means of posterior arrays post_mean.col(j) = mu1_mat * posterior_weights.col(j); post_var.col(j) = diag_mu2_mat * posterior_weights.col(j); neg_prob.col(j) = neg_mat * posterior_weights.col(j); zero_prob.col(j) = zero_mat * posterior_weights.col(j); post_cov.slice(j) -= post_mean.col(j) * post_mean.col(j).t(); if (to_estimate_prior) { #pragma omp critical { for (uword p = 0; p < U_cube.n_slices; ++p) { // we will compute some quantity to provide for // EM update for prior scalar in mmbr package // the M-step update is: // \sigma_0^2 = \sum_{p=1}^P p(\gamma_p) \mathrm{tr}(U_p^{-1} E[bb^T \,|\, \gamma_p])/r // where E[bb^T \,|\, \gamma_p] = \sum_j \alpha_{p,j} * mu2_mat_{p,j} Eb2_cube.slice(p) += posterior_variable_weights.at(p, j) * mu2_cube.slice(p); } } } } post_var -= pow(post_mean, 2.0); if (to_estimate_prior) { // now compute \mathrm{tr}(U_p^{-1} E[bb^T \,|\, \gamma_p])/r for each p for (uword p = 0; p < U_cube.n_slices; ++p) { prior_scalar.at(p) = trace(Uinv_cube.slice(p) * Eb2_cube.slice(p)); } } return 0; } // mvsermix_compute_posterior // This implements the core part of the compute_posterior_comcov method in // the MVSERMix class. int mvsermix_compute_posterior_comcov(const mat& b_mat, const mat & s_mat, const mat & v_mat, const cube & U_cube, const cube & Vinv_cube, const cube & U0_cube, const cube & Uinv_cube, mat & post_mean, mat & post_var, mat & neg_prob, mat & zero_prob, cube & post_cov, vec & prior_scalar, const mat & posterior_weights, const mat & posterior_variable_weights) { mat mean(post_mean.n_rows, post_mean.n_cols); mean.fill(0); // for Eb2_cube see compute_posterior() for detailed documentations. cube Eb2_cube; bool to_estimate_prior = !posterior_variable_weights.is_empty(); if (to_estimate_prior) { Eb2_cube.set_size(post_mean.n_rows, post_mean.n_rows, U_cube.n_slices); Eb2_cube.zeros(); } // R X R mat Vinv; if (Vinv_cube.is_empty()) Vinv = inv_sympd(get_cov(s_mat.col(0), v_mat)); else Vinv = Vinv_cube.slice(0); rowvec ones(post_mean.n_cols); rowvec zeros(post_mean.n_cols); ones.fill(1); zeros.fill(0); #pragma \ omp parallel for schedule(static) default(none) shared(posterior_weights, posterior_variable_weights, to_estimate_prior, mean, Vinv, zeros, ones, Eb2_cube, post_mean, post_var, neg_prob, zero_prob, post_cov, prior_scalar, b_mat, U_cube, U0_cube, Uinv_cube) for (uword p = 0; p < U_cube.n_slices; ++p) { mat zero_mat(post_mean.n_rows, post_mean.n_cols); // R X R mat U1; // R X J mat mu1_mat; zero_mat.fill(0); if (U0_cube.is_empty()) U1 = get_posterior_cov(Vinv, U_cube.slice(p)); else U1 = U0_cube.slice(p); mu1_mat = get_posterior_mean_mat(b_mat, Vinv, U1); cube mu2_cube; mu2_cube.set_size(post_mean.n_rows, post_mean.n_rows, post_mean.n_cols); for (uword j = 0; j < post_mean.n_cols; ++j) { mu2_cube.slice(j) = U1 + mu1_mat.col(j) * mu1_mat.col(j).t(); if (to_estimate_prior) Eb2_cube.slice(p) += posterior_variable_weights.at(p, j) * mu2_cube.slice(j); } if (to_estimate_prior) prior_scalar.at(p) = trace(Uinv_cube.slice(p) * Eb2_cube.slice(p)); // R X J mat diag_mu2_mat = pow(mu1_mat, 2.0); diag_mu2_mat.each_col() += U1.diag(); // R X J // FIXME: any better way to init sigma? mat sigma(post_mean.n_rows, post_mean.n_cols); sigma.fill(0); sigma.each_col() += sqrt(U1.diag()); // U1.diag() is the posterior covariance mat neg_mat = pnorm(mu1_mat, mean, sigma); for (uword r = 0; r < sigma.n_rows; ++r) { if (sigma.at(r, 0) == 0) { zero_mat.row(r) = ones; neg_mat.row(r) = zeros; } } #pragma omp critical { // compute weighted means of posterior arrays post_mean += mu1_mat.each_row() % posterior_weights.row(p); post_var += diag_mu2_mat.each_row() % posterior_weights.row(p); neg_prob += neg_mat.each_row() % posterior_weights.row(p); zero_prob += zero_mat.each_row() % posterior_weights.row(p); for (uword j = 0; j < post_mean.n_cols; ++j) { post_cov.slice(j) += posterior_weights.at(p, j) * mu2_cube.slice(j); } } } post_var -= pow(post_mean, 2.0); #pragma omp parallel for schedule(static) default(none) shared(post_cov, post_mean) for (uword j = 0; j < post_mean.n_cols; ++j) { post_cov.slice(j) -= post_mean.col(j) * post_mean.col(j).t(); } return 0; } // mvsermix_compute_posterior_comcov #endif // ifndef _MASH_H
radmin_fmt_plug.c
/* RAdmin v2.x cracker patch for JtR. Hacked together during * May of 2012 by Dhiru Kholia <dhiru.kholia at gmail.com>. * * This software is Copyright (c) 2012, Dhiru Kholia <dhiru.kholia at gmail.com>, * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without modification, * are permitted. * * Input Format => user:$radmin2$hash */ #if FMT_EXTERNS_H extern struct fmt_main fmt_radmin; #elif FMT_REGISTERS_H john_register_one(&fmt_radmin); #else #include "md5.h" #include <string.h> #include <assert.h> #include <errno.h> #include "arch.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #ifdef _OPENMP #include <omp.h> // Tuned on core i7 quad HT // 1 7445K // 16 12155K // 32 12470K ** this was chosen. // 64 12608k // 128 12508k #ifndef OMP_SCALE #define OMP_SCALE 32 #endif #endif #include "memdbg.h" #define FORMAT_LABEL "RAdmin" #define FORMAT_NAME "v2.x" #define FORMAT_TAG "$radmin2$" #define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1) #define ALGORITHM_NAME "MD5 32/" ARCH_BITS_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 99 #define CIPHERTEXT_LENGTH 32 #define BINARY_SIZE 16 #define SALT_SIZE 0 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 64 #define BINARY_ALIGN 4 #define SALT_ALIGN 1 static struct fmt_tests radmin_tests[] = { {"$radmin2$B137F09CF92F465CABCA06AB1B283C1F", "lastwolf"}, {"$radmin2$14e897b1a9354f875df51047bb1a0765", "podebradka"}, {"$radmin2$02ba5e187e2589be6f80da0046aa7e3c", "12345678"}, {"$radmin2$b4e13c7149ebde51e510959f30319ac7", "firebaLL"}, {"$radmin2$3d2c8cae4621edf8abb081408569482b", "yamaha12345"}, {"$radmin2$60cb8e411b02c10ecc3c98e29e830de8", "xplicit"}, {"$radmin2$53b1dc4fd27e58a075b196f99b2ac992", "UPPERCASE"}, {"$radmin2$6d0bb00954ceb7fbee436bb55a8397a9", ""}, {NULL} }; static char (*saved_key)[PLAINTEXT_LENGTH+1]; static ARCH_WORD_32 (*crypt_out)[8]; static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); crypt_out = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_out)); } static void done(void) { MEM_FREE(saved_key); MEM_FREE(crypt_out); } static char *split(char *ciphertext, int index, struct fmt_main *self) { static char buf[CIPHERTEXT_LENGTH + FORMAT_TAG_LEN + 1]; // $radmin2$ is 9 bytes strnzcpy(buf, ciphertext, CIPHERTEXT_LENGTH + FORMAT_TAG_LEN + 1); strlwr(buf); return buf; } static int valid(char *ciphertext, struct fmt_main *self) { char *p; int extra; if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN)) return 0; p = ciphertext + FORMAT_TAG_LEN; if (hexlen(p, &extra) != CIPHERTEXT_LENGTH || extra) return 0; return 1; } static void *get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE+1]; ARCH_WORD dummy; } buf; unsigned char *out = buf.c; char *p; int i; p = strrchr(ciphertext, '$') + 1; for (i = 0; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index; #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index++) { MD5_CTX ctx; MD5_Init(&ctx); MD5_Update(&ctx, saved_key[index], sizeof(saved_key[index])); MD5_Final((unsigned char *)crypt_out[index], &ctx); } return count; } static int cmp_all(void *binary, int count) { int index; for (index = 0; index < count; index++) if (*(ARCH_WORD_32 *)binary == crypt_out[index][0]) return 1; return 0; } static int cmp_one(void *binary, int index) { return *(ARCH_WORD_32 *)binary == crypt_out[index][0]; } static int cmp_exact(char *source, int index) { void *binary = get_binary(source); return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static void radmin_set_key(char *key, int index) { // this code assures that both saved_key[index] gets null-terminated (without buffer overflow) char *cp = &saved_key[index][strnzcpyn(saved_key[index], key, PLAINTEXT_LENGTH + 1)+1]; // and is null padded up to 100 bytes. We simply clean up prior buffer, up to element 99, but that element will never be written to if (cp < &saved_key[index][99]) while (*cp) *cp++ = 0; } static char *get_key(int index) { // assured null teminated string. Just return it. return saved_key[index]; } struct fmt_main fmt_radmin = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_OMP_BAD | FMT_SPLIT_UNIFIES_CASE, { NULL }, { FORMAT_TAG }, radmin_tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, split, get_binary, fmt_default_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, fmt_default_set_salt, radmin_set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
samples.h
/*************************************************************************************************** * Copyright (c) 2011-2020, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, are not permit- * ted. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once #include <assert.h> #include <float.h> #include <math.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <cuda_runtime_api.h> #include <cuda_fp16.h> #include <xmma/numeric_types.h> #include <xmma/params.h> //////////////////////////////////////////////////////////////////////////////////////////////////// #define XMMA_CHECK_CUDA(call) do { \ cudaError status_ = call; \ if( status_ != cudaSuccess ) { \ fprintf(stderr, "Cuda error in file \"%s\" at line %d: %s\n", \ __FILE__, \ __LINE__, \ cudaGetErrorString(status_)); \ exit(1); \ } \ } while(0) //////////////////////////////////////////////////////////////////////////////////////////////////// // // A R C H / D A T A T Y P E S // //////////////////////////////////////////////////////////////////////////////////////////////////// enum Arch { ARCH_VOLTA = 0x1, ARCH_TURING = 0x2, ARCH_AMPERE = 0x4, ARCH_HOPPER = 0x8 }; //////////////////////////////////////////////////////////////////////////////////////////////////// static inline Arch sm_to_arch(int sm) { if( sm == 70 || sm == 72 ) { return ARCH_VOLTA; } else if( sm == 75 ) { return ARCH_TURING; } else if( sm == 80 || sm == 82 ) { return ARCH_AMPERE; } else if( sm == 90 ) { return ARCH_HOPPER; } else { assert(false); } return ARCH_VOLTA; } //////////////////////////////////////////////////////////////////////////////////////////////////// enum Data_type { DATA_TYPE_BOOL, DATA_TYPE_TF32, DATA_TYPE_BF16, DATA_TYPE_FP16, DATA_TYPE_FP32, DATA_TYPE_INT4, DATA_TYPE_INT8, DATA_TYPE_INT8x32, DATA_TYPE_INT32, DATA_TYPE_FP64 }; //////////////////////////////////////////////////////////////////////////////////////////////////// struct Gemm_traits_desc { // The architecture. unsigned arch_; // The type of the elements of A. Data_type a_type_; // The type of the elements of B. Data_type b_type_; // The type of the elements of C. Data_type c_type_; // The type of the accumulators. Data_type acc_type_; }; //////////////////////////////////////////////////////////////////////////////////////////////////// struct Gemm_params { // Ctor. Gemm_params() { } // The dimensions of the matrices. int m_, n_, k_; // The dimensions of the leading dimensions. int lda_, ldb_, ldc_, ldd_; // The transposition. bool ta_, tb_; // The alpha and beta values. double alpha_[2], beta_[2]; // Do we use horizontal CTA rasterization? bool use_horizontal_cta_rasterization_; // The number of split-k slices. int split_k_slices_; // The number of split-k buffers (between 1 and split-k-slices). int split_k_buffers_; // Are we doing the final reduction in a separate kernel? int split_k_kernels_; }; //////////////////////////////////////////////////////////////////////////////////////////////////// struct Convolution_traits_desc { // The architecture. unsigned arch_; // The type of the elements of the activations. Data_type act_type_; // The type of the elements of the filters. Data_type flt_type_; // The type of the elements of the output. Data_type out_type_; // The type of the accumulators. Data_type acc_type_; // The type of the elements if bias Data_type bias_type_; }; //////////////////////////////////////////////////////////////////////////////////////////////////// struct Convolution_params { // Ctor. Convolution_params() { memset(this, 0, sizeof(Convolution_params)); } // Compute the output dimensions. inline void compute_output_dimensions() { o_ = (d_ + 2*pad_d_ - (t_-1)*dilation_d_ - 1) / stride_d_ + 1; p_ = (h_ + 2*pad_h_ - (r_-1)*dilation_h_ - 1) / stride_h_ + 1; q_ = (w_ + 2*pad_w_ - (s_-1)*dilation_w_ - 1) / stride_w_ + 1; } // Data layouts xmma::Convolution_layout Layout_A, Layout_B, Layout_C; // The dimensions of the input activation tensor. int g_, n_, c_, d_, h_, w_; // The dimensions of the filter tensor. int k_, t_, r_, s_; // The dimensions of the output tensor. int o_, p_, q_; // The padding. int pad_d_, pad_h_, pad_w_; // The strides. int stride_d_, stride_h_, stride_w_; // The dilation. int dilation_d_, dilation_h_, dilation_w_; // The alpha and beta values. double alpha_[2], beta_[2]; // Is it a cross correlation? bool is_cross_correlation_; // The activation. bool with_relu_; float relu_lb_, relu_ub_; // The bias. bool with_bias_; // Do we use horizontal CTA rasterization? bool use_horizontal_cta_rasterization_; // The number of split-k slices. int split_k_slices_; // The number of split-k buffers (between 1 and split-k-slices). int split_k_buffers_; // Are we doing the final reduction in a separate kernel? int split_k_kernels_; // The parameters to control how to split the TRSC dimension. int split_k_c_, split_k_t_, split_k_r_; int nhwc_pitch_c_; }; //////////////////////////////////////////////////////////////////////////////////////////////////// // // C H E C K S // //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined SAMPLES static inline void print_results(bool with_colors, bool enabled, bool success = false) { // The opening tag. char beg[16]; if( with_colors && enabled && success ) { // Succeeded -> green strcpy(beg, "\033[0;32m"); } else if( with_colors && enabled ) { // Failed -> red strcpy(beg, "\033[0;31m"); } else if( with_colors ) { // Disabled -> yellow strcpy(beg, "\033[0;33m"); } // The message. char msg[16]; if( enabled && success ) { strcpy(msg, "SUCCESS"); } else if( enabled ) { strcpy(msg, "FAILED"); } else { strcpy(msg, "DISABLED"); } // The closing tag. char end[16]; if( with_colors ) { strcpy(end, "\033[0m"); } // Print the results. if( with_colors ) { printf("Checks........: %s%s%s\n", beg, msg, end); } else { printf("Checks........: %s\n", msg); } } //////////////////////////////////////////////////////////////////////////////////////////////////// static inline int check_results(const float *out, const float *ref, size_t m, size_t n, size_t ld, float epsilon, bool verbose, bool with_colors) { int failed = 0, infs = 0; float min_val = +FLT_MAX, max_val = -FLT_MAX, min_err = +FLT_MAX, max_err = -FLT_MAX; double avg_val = 0.0, sqr_val = 0.0, avg_err = 0.0, sqr_err = 0.0; double inv_mn = 1.0 / (double) m / (double) n; for( size_t ni = 0; ni < n; ++ni ) { for( size_t mi = 0; mi < m; ++mi ) { // The offset. size_t ii = (size_t) ni * ld + mi; // The elements. float a = out[ii]; float b = ref[ii]; // Compute the error. float den = fabsf(a) + fabsf(b); float err = den <= epsilon ? fabsf(a-b) : fabsf(a-b) / den; // Min/max values. min_val = fminf(a, min_val); max_val = fmaxf(a, max_val); min_err = fminf(err, min_err); max_err = fmaxf(err, max_err); // Sums to compute the average value. avg_val += (double) a * inv_mn; sqr_val += (double) a * a * inv_mn; avg_err += (double) err * inv_mn; sqr_err += (double) err * err * inv_mn; // Does it fail? if( isnan(a) || isnan(b) || err > epsilon ) { if( failed < 8 ) { printf("\tInvalid result for ni=%lu mi=%lu ii=%lu:\n", ni, mi, ii); printf("\t Found...: 0x%08x (%10.6f)\n", *(const int*) &out[ii], a); printf("\t Expected: 0x%08x (%10.6f)\n", *(const int*) &ref[ii], b); printf("\t Error...: %10.6f\n", err); } failed++; } infs += !isfinite(a); infs += !isfinite(b); } } double std_val = sqrtf(sqr_val - avg_val * avg_val); double std_err = sqrtf(sqr_err - avg_err * avg_err); if( verbose ) { printf("Epsilon.......: %.8f\n", epsilon); printf("Tested........: %lu\n", m*n); printf("Failed........: %d\n", failed); printf("Values........: Min=%12.6f, Max=%12.6f, Avg=%10.6lf, Std=%10.6lf\n", min_val, max_val, avg_val, std_val); printf("Error.........: Min=%12.6f, Max=%12.6f, Avg=%10.6lf, Std=%10.6lf\n", min_err, max_err, avg_err, std_err); printf("Epsilon.......: %.6f\n", epsilon); printf("Infs..........: %d\n", infs); print_results(with_colors, true, !failed); } return failed ? 1 : 0; } //////////////////////////////////////////////////////////////////////////////////////////////////// // // C O N V E R S I O N F R O M F L O A T // //////////////////////////////////////////////////////////////////////////////////////////////////// static inline void convert_bf16_from_float_(cutlass::float_bf16_t *dst, const float *src, size_t n) { for( size_t ii = 0; ii < n; ++ii ) { // Decompose the float into 2 uint16. union { uint16_t u16[2]; float f32; } tmp; tmp.f32 = src[ii]; // Decompose x into lo/hi parts. uint16_t lo = tmp.u16[0]; uint16_t hi = tmp.u16[1]; // Tweak the hi part if needed. if( lo == 0x8000 ) { hi += hi & 0x1; } else if( lo > 0x8000 ) { hi++; } dst[ii] = hi; } } //////////////////////////////////////////////////////////////////////////////////////////////////// static inline void convert_tf32_from_float_(uint32_t *dst, const float *src, size_t n) { for( size_t ii = 0; ii < n; ++ii ) { uint32_t x = reinterpret_cast<const uint32_t*>(src)[ii]; dst[ii] = x & 0xffffd000u; } } //////////////////////////////////////////////////////////////////////////////////////////////////// static inline void convert_from_float_(cutlass::half_t *dst, const float *src, size_t n) { for( size_t ii = 0; ii < n; ++ii ) { reinterpret_cast<__half*>(dst)[ii] = __float2half_rn(src[ii]); } } //////////////////////////////////////////////////////////////////////////////////////////////////// static inline void convert_from_float_(int32_t *dst, const float *src, size_t n) { for( size_t ii = 0; ii < n; ++ii ) { dst[ii] = (int32_t) src[ii]; } } //////////////////////////////////////////////////////////////////////////////////////////////////// static inline void convert_from_float_(int8_t *dst, const float *src, size_t n) { for( size_t ii = 0; ii < n; ++ii ) { float x = src[ii]; dst[ii] = (int8_t) (int32_t) (x < -128.f ? -128.f : (x > 127.f ? 127.f : x)); } } //////////////////////////////////////////////////////////////////////////////////////////////////// static inline void convert_fp64_from_float_(double *dst, const float *src, size_t n) { for( size_t ii = 0; ii < n; ++ii ) { dst[ii] = src[ii]; } } //////////////////////////////////////////////////////////////////////////////////////////////////// static inline void convert_from_float(void *dst, const float *src, size_t n, Data_type dtype) { switch( dtype ) { case DATA_TYPE_BF16: convert_bf16_from_float_(reinterpret_cast<cutlass::float_bf16_t*>(dst), src, n); break; case DATA_TYPE_TF32: convert_tf32_from_float_(reinterpret_cast<uint32_t*>(dst), src, n); break; case DATA_TYPE_FP32: memcpy(dst, src, n*sizeof(float)); break; case DATA_TYPE_FP16: convert_from_float_(reinterpret_cast<cutlass::half_t*>(dst), src, n); break; case DATA_TYPE_INT32: convert_from_float_(reinterpret_cast<int32_t*>(dst), src, n); break; case DATA_TYPE_INT8: convert_from_float_(reinterpret_cast<int8_t*>(dst), src, n); break; case DATA_TYPE_INT8x32: convert_from_float_(reinterpret_cast<int8_t*>(dst), src, n); break; case DATA_TYPE_FP64: convert_fp64_from_float_(reinterpret_cast<double*>(dst), src, n); break; default: assert(false); // Not implemented! } } //////////////////////////////////////////////////////////////////////////////////////////////////// // // C O N V E R S I O N T O F L O A T // //////////////////////////////////////////////////////////////////////////////////////////////////// static inline void convert_bf16_to_float_(float *dst, const cutlass::float_bf16_t *src, size_t n) { for( size_t ii = 0; ii < n; ++ii ) { union { uint16_t u16[2]; uint32_t u32; } tmp; tmp.u16[0] = uint16_t(0); tmp.u16[1] = src[ii]; reinterpret_cast<uint32_t*>(dst)[ii] = tmp.u32; } } //////////////////////////////////////////////////////////////////////////////////////////////////// static inline void convert_to_float_(float *dst, const cutlass::half_t *src, size_t n) { for( size_t ii = 0; ii < n; ++ii ) { dst[ii] = __half2float(reinterpret_cast<const __half*>(src)[ii]); } } //////////////////////////////////////////////////////////////////////////////////////////////////// static inline void convert_to_float_(float *dst, const int32_t *src, size_t n) { for( size_t ii = 0; ii < n; ++ii ) { dst[ii] = (float) src[ii]; } } //////////////////////////////////////////////////////////////////////////////////////////////////// static inline void convert_to_float_(float *dst, const int8_t *src, size_t n) { for( size_t ii = 0; ii < n; ++ii ) { dst[ii] = (float) (int32_t) src[ii]; } } //////////////////////////////////////////////////////////////////////////////////////////////////// static inline void convert_double_to_float_(float *dst, const double *src, size_t n) { for( size_t ii = 0; ii < n; ++ii ) { dst[ii] = src[ii]; } } //////////////////////////////////////////////////////////////////////////////////////////////////// static inline void convert_to_float(float *dst, const void *src, size_t n, Data_type dtype) { switch( dtype ) { case DATA_TYPE_BF16: convert_bf16_to_float_(dst, reinterpret_cast<const cutlass::float_bf16_t*>(dst), n); break; case DATA_TYPE_TF32: case DATA_TYPE_FP32: memcpy(dst, src, n*sizeof(float)); break; case DATA_TYPE_FP16: convert_to_float_(dst, reinterpret_cast<const cutlass::half_t*>(src), n); break; case DATA_TYPE_INT32: convert_to_float_(dst, reinterpret_cast<const int32_t*>(src), n); break; case DATA_TYPE_INT8: convert_to_float_(dst, reinterpret_cast<const int8_t*>(src), n); break; case DATA_TYPE_INT8x32: convert_to_float_(dst, reinterpret_cast<const int8_t*>(src), n); break; case DATA_TYPE_FP64: convert_double_to_float_(dst, reinterpret_cast<const double*>(src), n); break; default: assert(false); // Not implemented! } } //////////////////////////////////////////////////////////////////////////////////////////////////// // // G E M M S // //////////////////////////////////////////////////////////////////////////////////////////////////// template< typename Params > static inline void gemm(float *d, const float *a, const float *b, const float *c, const Params &params) { // Use floats for alpha/beta. float alpha = (float) params.alpha_[0], beta = (float) params.beta_[0]; // #pragma omp parallel for for( int ni = 0; ni < params.n_; ++ni ) { for( int mi = 0; mi < params.m_; ++mi ) { float sum = 0.f; for( int ki = 0; ki < params.k_; ++ki ) { // The offsets for A and B. size_t a_offset = params.ta_ ? (size_t) mi*params.lda_ + ki : (size_t) ki*params.lda_ + mi; size_t b_offset = params.tb_ ? (size_t) ki*params.ldb_ + ni : (size_t) ni*params.ldb_ + ki; // Read the elements. float x = a[a_offset]; float y = b[b_offset]; // Update the sum. sum += x * y; // if (ni==0 && mi==0) { // printf("ki=%d y=%1.0f %1.0f\n",ki,x,sum); // } } // ki // Update the result. float z = 0.f; if( beta != 0.f ) { z = c[(size_t) ni*params.ldc_ + mi]; } d[(size_t) ni*params.ldd_ + mi] = alpha * sum + beta * z; } // mi } // ni } //////////////////////////////////////////////////////////////////////////////////////////////////// // // C O N V O L U T I O N S // //////////////////////////////////////////////////////////////////////////////////////////////////// struct Identity_functor { inline float operator()(int, int, int, int, int, float val) const { return val; } }; //////////////////////////////////////////////////////////////////////////////////////////////////// struct Fprop_bias_functor { // Ctor. Fprop_bias_functor(const float *bias) : bias_(bias) { } // Add the bias and apply relu. inline float operator()(int, int, int, int, int ki, float val) const { return val + bias_[ki]; } // The bias tensor. const float *bias_; }; //////////////////////////////////////////////////////////////////////////////////////////////////// struct Fprop_bias_and_relu_functor : public Fprop_bias_functor { // Ctor. Fprop_bias_and_relu_functor(const float *bias) : Fprop_bias_functor(bias) { } // Add the bias and apply relu. inline float operator()(int ni, int oi, int pi, int qi, int ki, float val) const { return fmaxf(Fprop_bias_functor::operator()(ni, oi, pi, qi, ki, val), 0.f); } }; //////////////////////////////////////////////////////////////////////////////////////////////////// template< typename Params > static inline void print_converted_inputs(const float* act, const float* flt, const Params &params) { float* converted_act = (float*) malloc(params.n_ * params.c_ * params.h_ * params.h_); float* converted_flt = (float*) malloc(params.k_ * params.c_ * params.r_ * params.s_); for (int ni=0; ni < params.n_; ni++) { for (int ci=0; ci < params.c_ / 32; ci++) { for (int hi=0; hi < params.h_; hi++) { for (int wi=0; wi < params.w_; wi++) { for (int i=0; i < 32; i++) { int act_idx = ni * params.c_ * params.h_ * params.w_ + ci * params.h_ * params.w_ * 32 + hi * params.w_ * 32 + wi * 32 + i; int converted_act_idx = ni * params.h_ * params.w_ * params.c_ + hi * params.w_ * params.c_; wi * params.c_ + ci * 32 + i; converted_act[converted_act_idx] = act[act_idx]; } } } } } for (int ni=0; ni < params.n_; ni++) { for (int hi=0; hi < params.h_; hi++) { for (int wi=0; wi < params.w_; wi++) { for (int ci=0; ci < params.c_; ci++) { int idx = ni * params.h_ * params.w_ * params.c_ + hi * params.w_ * params.c_; wi * params.c_ + ci; printf("%f ", converted_act[idx]); } printf("\n"); } } } for (int ki=0; ki < params.k_; ki++) { for (int ci=0; ci < params.c_ / 32; ci++) { for (int ri=0; ri < params.r_; ri++) { for (int si=0; si < params.s_; si++) { for (int i=0; i < 32; i++) { int flt_idx = ki * params.c_ * params.r_ * params.s_ + ci * params.r_ * params.s_ * 32 + ri * params.s_ * 32 + si * 32 + i; int converted_flt_idx = ki * params.r_ * params.s_ * params.c_ + ri * params.s_ * params.c_; si * params.c_ + ci * 32 + i; converted_flt[converted_flt_idx] = flt[flt_idx]; } } } } } for (int ki=0; ki < params.k_; ki++) { for (int ri=0; ri < params.r_; ri++) { for (int si=0; si < params.s_; si++) { for (int ci=0; ci < params.c_; ci++) { int idx = ki * params.r_ * params.s_ * params.c_ + ri * params.s_ * params.c_; si * params.c_ + ci; printf("%f ", converted_flt[idx]); } printf("\n"); } } } free(converted_act); free(converted_flt); } template< typename Params > static inline void fprop_interleaved(float *out, const float *act, const float *flt, const Params &params) { print_converted_inputs(act, flt, params); // Use floats for alpha/beta. float alpha = (float) params.alpha_[0], beta = (float) params.beta_[0]; #pragma omp parallel for for( int ni = 0; ni < params.n_; ++ni ) { for( int pi = 0; pi < params.p_; ++pi ) { for( int qi = 0; qi < params.q_; ++qi ) { for( int ki = 0; ki < params.k_; ++ki ) { float sum = 0.f; for( int ri = 0; ri < params.r_; ++ri ) { for( int si = 0; si < params.s_; ++si ) { for( int ci = 0; ci < params.c_ ; ++ci ) { // The filter shift. int rj = ri; int sj = si; // Deal with convolution. if( !params.is_cross_correlation_ ) { rj = params.r_ - ri - 1; sj = params.s_ - si - 1; } // The coordinates of the pixel in the image. int hi = pi * params.stride_h_ + rj * params.dilation_h_ - params.pad_h_; int wi = qi*params.stride_w_ + sj*params.dilation_w_ - params.pad_w_; // Is the pixel in the image? bool is_in_image = (unsigned) hi < (unsigned) params.h_ && (unsigned) wi < (unsigned) params.w_; // Deal with convolution. if( !params.is_cross_correlation_ ) { rj = params.r_ - ri - 1; sj = params.s_ - si - 1; } // The offsets. (assuming NC/32HW32) int act_offset = (size_t) ni * params.h_ * params.w_ * params.c_ + int(ci / 32) * params.h_ * params.w_ * 32 + hi * params.w_ * 32 + wi * 32 + ci % 32; size_t flt_offset = (size_t) ki * params.r_ * params.s_ * params.c_ + int(ci / 32) * params.r_ * params.s_ * 32 + ri * params.s_ * 32 + si * 32 + ci % 32; //printf("n, p, q, k, r, s, c = (%d %d %d %d %d %d %d) ; act offset = %d \n", ni, pi, qi, ki, ri, si, ci, act_offset); // The two values. float a = is_in_image ? act[act_offset] : 0.f; float b = is_in_image ? flt[flt_offset] : 0.f; // Update the output value. sum += a * b; } // ci } // si } // ri // Store the output value for real. size_t out_offset = (size_t) ni * params.p_ * params.q_ * params.k_ + int(ki / 32) * params.p_ * params.q_ * 32 + pi * params.q_ * 32 + qi * 32 + ki % 32; // Update the value. float val; if( beta != 0.f ) { val = alpha * sum + beta * out[out_offset]; } else { val = alpha * sum; } // Store the value. out[out_offset] = val; } // ki } // qi } // pi } // ni } //////////////////////////////////////////////////////////////////////////////////////////////////// template< typename Params, typename Functor = Identity_functor > static inline void fprop_ndhwc(float *out, const float *act, const float *flt, const Params &params, const bool is_img_ncdhw = false, const bool is_out_ncdhw = false, const Functor &fct = Functor(), Data_type dtype_out = DATA_TYPE_FP32) { // Use floats for alpha/beta. float alpha = (float) params.alpha_[0], beta = (float) params.beta_[0]; #pragma omp parallel for for( int ni = 0; ni < params.n_; ++ni ) { for( int gi = 0; gi < params.g_; ++gi ) { for( int oi = 0; oi < params.o_; ++oi ) { for( int pi = 0; pi < params.p_; ++pi ) { for( int qi = 0; qi < params.q_; ++qi ) { for( int ki = 0; ki < (params.k_ / params.g_); ++ki ) { float sum = 0.f; for( int ti = 0; ti < params.t_; ++ti ) { for( int ri = 0; ri < params.r_; ++ri ) { for( int si = 0; si < params.s_; ++si ) { for( int ci = 0; ci < params.c_ / params.g_; ++ci ) { // The filter shift. int tj = ti; int rj = ri; int sj = si; // Deal with convolution. if( !params.is_cross_correlation_ ) { tj = params.t_ - ti - 1; rj = params.r_ - ri - 1; sj = params.s_ - si - 1; } // The coordinates of the pixel in the image. int di = oi*params.stride_d_ + tj*params.dilation_d_ - params.pad_d_; int hi = pi*params.stride_h_ + rj*params.dilation_h_ - params.pad_h_; int wi = qi*params.stride_w_ + sj*params.dilation_w_ - params.pad_w_; // Is the pixel in the image? bool is_in_image = (unsigned) di < (unsigned) params.d_ && (unsigned) hi < (unsigned) params.h_ && (unsigned) wi < (unsigned) params.w_; // The offsets. size_t act_offset = is_img_ncdhw ? (size_t) ni*params.d_*params.h_*params.w_*params.c_ + (gi * (params.c_ / params.g_) + ci)*params.d_*params.h_*params.w_ + di*params.h_*params.w_ + hi*params.w_ + wi : (size_t) ni*params.d_*params.h_*params.w_*params.c_ + di*params.h_*params.w_*params.c_ + hi*params.w_*params.c_ + wi*params.c_ + (gi * (params.c_ / params.g_) + ci); size_t flt_offset = (size_t) gi * (params.k_ / params.g_)*params.t_*params.r_*params.s_*(params.c_ / params.g_) + ki*params.t_*params.r_*params.s_*(params.c_ / params.g_) + ti*params.r_*params.s_*(params.c_ / params.g_) + ri*params.s_*(params.c_ / params.g_) + si*(params.c_ / params.g_) + ci; // The two values. float a = is_in_image ? act[act_offset] : 0.f; float b = is_in_image ? flt[flt_offset] : 0.f; // Update the output value. sum += a*b; } // ci } // si } // ri } // ti // Store the output value for real. size_t out_offset = is_out_ncdhw ? (size_t) ni*params.o_*params.p_*params.q_*params.k_ + (gi * (params.k_ / params.g_) + ki)*params.o_*params.p_*params.q_ + oi*params.p_*params.q_ + pi*params.q_ + qi : (size_t) ni*params.o_*params.p_*params.q_*params.k_ + oi*params.p_*params.q_*params.k_ + pi*params.q_*params.k_ + qi*params.k_ + (gi * (params.k_ / params.g_) + ki); // Update the value. float val; if( beta != 0.f ) { val = alpha * sum + beta * out[out_offset]; } else { val = alpha * sum; } if( dtype_out == DATA_TYPE_INT8) { if( val > 128 ) { val = 127; } else if( val < -128) { val = -128; } } // Store the value. out[out_offset] = fct(ni, oi, pi, qi, ki, val); } // ki } // qi } // pi } // oi } // gi } // ni } //////////////////////////////////////////////////////////////////////////////////////////////////// template< typename Params, typename Functor = Identity_functor > static inline void dgrad_ndhwc(float *act, const float *out, const float *flt, const Params &params, const bool is_img_ncdhw = false, const bool is_out_ncdhw = false, const Functor &fct = Functor()) { // Use floats for alpha/beta. float alpha = (float) params.alpha_[0], beta = (float) params.beta_[0]; // Clear the output as we perform scattered writes. size_t ndhwc = (size_t) params.n_ * params.d_ * params.h_ * params.w_ * params.c_; for( size_t ii = 0; ii < ndhwc; ++ii ) { act[ii] = beta != 0.f ? beta * act[ii] : 0.f; } // Do dgrad... for( int ni = 0; ni < params.n_; ++ni ) { for( int oi = 0; oi < params.o_; ++oi ) { for( int pi = 0; pi < params.p_; ++pi ) { for( int qi = 0; qi < params.q_; ++qi ) { for( int ki = 0; ki < params.k_; ++ki ) { for( int ti = 0; ti < params.t_; ++ti ) { for( int ri = 0; ri < params.r_; ++ri ) { for( int si = 0; si < params.s_; ++si ) { for( int ci = 0; ci < params.c_; ++ci ) { // The filter shift. int tj = ti; int rj = ri; int sj = si; // Deal with convolution. if( !params.is_cross_correlation_ ) { tj = params.t_ - ti - 1; rj = params.r_ - ri - 1; sj = params.s_ - si - 1; } // The coordinates of the pixel in the image. int di = oi*params.stride_d_ + tj*params.dilation_d_ - params.pad_d_; int hi = pi*params.stride_h_ + rj*params.dilation_h_ - params.pad_h_; int wi = qi*params.stride_w_ + sj*params.dilation_w_ - params.pad_w_; // Is the pixel in the image? bool is_in_image = (unsigned) di < params.d_ && (unsigned) hi < params.h_ && (unsigned) wi < params.w_; // The input offsets. size_t out_offset = is_img_ncdhw ? (size_t) ni*params.o_*params.p_*params.q_*params.k_ + ki*params.o_*params.p_*params.q_ + oi*params.p_*params.q_ + pi*params.q_ + qi : (size_t) ni*params.o_*params.p_*params.q_*params.k_ + oi*params.p_*params.q_*params.k_ + pi*params.q_*params.k_ + qi*params.k_ + ki; size_t flt_offset = (size_t) ki*params.t_*params.r_*params.s_*params.c_ + ti*params.r_*params.s_*params.c_ + ri*params.s_*params.c_ + si*params.c_ + ci; // The two values. float a = is_in_image ? out[out_offset] : 0.f; float b = is_in_image ? flt[flt_offset] : 0.f; // The destination offset. size_t act_offset = is_out_ncdhw ? (size_t) ni*params.d_*params.h_*params.w_*params.c_ + ci*params.d_*params.h_*params.w_ + di*params.h_*params.w_ + hi*params.w_ + wi : (size_t) ni*params.d_*params.h_*params.w_*params.c_ + di*params.h_*params.w_*params.c_ + hi*params.w_*params.c_ + wi*params.c_ + ci; // Update the gradient of the pixel. act[act_offset] += alpha * (a * b); } // ci } // si } // ri } // ti } // ki } // qi } // pi } // oi } // ni // Apply the functor. for( int ni = 0; ni < params.n_; ++ni ) { for( int di = 0; di < params.d_; ++di ) { for( int hi = 0; hi < params.h_; ++hi ) { for( int wi = 0; wi < params.w_; ++wi ) { for( int ci = 0; ci < params.c_; ++ci ) { size_t act_offset = is_out_ncdhw ? (size_t) ni*params.d_*params.h_*params.w_*params.c_ + ci*params.d_*params.h_*params.w_ + di*params.h_*params.w_ + hi*params.w_ + wi : (size_t) ni*params.d_*params.h_*params.w_*params.c_ + di*params.h_*params.w_*params.c_ + hi*params.w_*params.c_ + wi*params.c_ + ci; act[act_offset] = fct(ni, di, hi, wi, ci, act[act_offset]); } // ci } // wi } // hi } // di } // ni } //////////////////////////////////////////////////////////////////////////////////////////////////// template< typename Params, typename Functor = Identity_functor > static inline void wgrad_ndhwc(float *flt, const float *act, const float *out, const Params &params, const bool is_img_ncdhw = false, const bool is_out_ncdhw = false, const Functor &fct = Functor()) { // Use floats for alpha/beta. float alpha = (float) params.alpha_[0], beta = (float) params.beta_[0]; #pragma omp parallel for for( int ki = 0; ki < params.k_; ++ki ) { for( int ti = 0; ti < params.t_; ++ti ) { for( int ri = 0; ri < params.r_; ++ri ) { for( int si = 0; si < params.s_; ++si ) { for( int ci = 0; ci < params.c_; ++ci ) { float sum = 0.f; for( int ni = 0; ni < params.n_; ++ni ) { for( int oi = 0; oi < params.o_; ++oi ) { for( int pi = 0; pi < params.p_; ++pi ) { for( int qi = 0; qi < params.q_; ++qi ) { // The filter shift. int tj = ti; int rj = ri; int sj = si; // Deal with convolution. if( !params.is_cross_correlation_ ) { tj = params.t_ - ti - 1; rj = params.r_ - ri - 1; sj = params.s_ - si - 1; } // The coordinates of the pixel in the image. int di = oi*params.stride_d_ + tj*params.dilation_d_ - params.pad_d_; int hi = pi*params.stride_h_ + rj*params.dilation_h_ - params.pad_h_; int wi = qi*params.stride_w_ + sj*params.dilation_w_ - params.pad_w_; // Is the pixel in the image? bool is_in_image = (unsigned) di < params.d_ && (unsigned) hi < params.h_ && (unsigned) wi < params.w_; // The offsets. size_t act_offset = is_img_ncdhw ? (size_t) ni*params.d_*params.h_*params.w_*params.c_ + ci*params.d_*params.h_*params.w_ + di*params.h_*params.w_ + hi*params.w_ + wi : (size_t) ni*params.d_*params.h_*params.w_*params.c_ + di*params.h_*params.w_*params.c_ + hi*params.w_*params.c_ + wi*params.c_ + ci; size_t out_offset = is_out_ncdhw ? (size_t) ni*params.o_*params.p_*params.q_*params.k_ + ki*params.o_*params.p_*params.q_ + oi*params.p_*params.q_ + pi*params.q_ + qi : (size_t) ni*params.o_*params.p_*params.q_*params.k_ + oi*params.p_*params.q_*params.k_ + pi*params.q_*params.k_ + qi*params.k_ + ki; // The two values. float a = is_in_image ? act[act_offset] : 0.f; float b = is_in_image ? out[out_offset] : 0.f; // Update the output value. sum += a*b; } // qi } // pi } // oi } // ni // Store the output value for real. size_t flt_offset = (size_t) ki*params.t_*params.r_*params.s_*params.c_ + ti*params.r_*params.s_*params.c_ + ri*params.s_*params.c_ + si*params.c_ + ci; // Update the value. float val; if( beta != 0.f ) { val = alpha * sum + beta * flt[flt_offset]; } else { val = alpha * sum; } // Apply the functor. flt[flt_offset] = fct(ki, ti, ri, si, ci, val); } // ci } // si } // ri } // ti } // ki } //////////////////////////////////////////////////////////////////////////////////////////////////// // // D A T A T Y P E F R O M / T O A S T R I N G // //////////////////////////////////////////////////////////////////////////////////////////////////// static inline Data_type data_type_from_string(const char *name) { if( !strcmp(name, "tf32") ) { return DATA_TYPE_TF32; } else if( !strcmp(name, "bf16") ) { return DATA_TYPE_BF16; } else if( !strcmp(name, "fp32") ) { return DATA_TYPE_FP32; } else if( !strcmp(name, "fp16") ) { return DATA_TYPE_FP16; } else if( !strcmp(name, "int32") ) { return DATA_TYPE_INT32; } else if( !strcmp(name, "int8") ) { return DATA_TYPE_INT8; } else if( !strcmp(name, "int4") ) { return DATA_TYPE_INT4; } else if( !strcmp(name, "bool") ) { return DATA_TYPE_BOOL; } else if( !strcmp(name, "fp64") ) { return DATA_TYPE_FP64; } else { assert(false); return DATA_TYPE_FP32; } } //////////////////////////////////////////////////////////////////////////////////////////////////// static inline const char* data_type_to_string(Data_type dtype) { switch( dtype ) { case DATA_TYPE_BF16: return "bf16"; case DATA_TYPE_TF32: return "tf32"; case DATA_TYPE_FP32: return "fp32"; case DATA_TYPE_FP16: return "fp16"; case DATA_TYPE_INT32: return "int32"; case DATA_TYPE_INT8: return "int8"; case DATA_TYPE_INT4: return "int4"; case DATA_TYPE_BOOL: return "bool"; case DATA_TYPE_FP64: return "fp64"; default: assert(false); return "unknown"; } } //////////////////////////////////////////////////////////////////////////////////////////////////// // // T R A N S P O S E S // //////////////////////////////////////////////////////////////////////////////////////////////////// static inline void ncdhw_to_ndhwc(float *dst, const float *src, int n, int c, int d, int h, int w) { for( int ni = 0; ni < n; ++ni ) { for( int ci = 0; ci < c; ++ci ) { for( int di = 0; di < d; ++di ) { for( int hi = 0; hi < h; ++hi ) { for( int wi = 0; wi < w; ++wi ) { size_t src_offset = (size_t) ni*c*d*h*w + ci*d*h*w + di*h*w + hi*w + wi; size_t dst_offset = (size_t) ni*d*h*w*c + di*h*w*c + hi*w*c + wi*c + ci; dst[dst_offset] = src[src_offset]; } // wi } // hi } // di } // ci } // ni } //////////////////////////////////////////////////////////////////////////////////////////////////// static inline void ndhwc_to_ncdhw(float *dst, const float *src, int n, int d, int h, int w, int c) { for( int ni = 0; ni < n; ++ni ) { for( int di = 0; di < d; ++di ) { for( int hi = 0; hi < h; ++hi ) { for( int wi = 0; wi < w; ++wi ) { for( int ci = 0; ci < c; ++ci ) { size_t src_offset = (size_t) ni*d*h*w*c + di*h*w*c + hi*w*c + wi*c + ci; size_t dst_offset = (size_t) ni*c*d*h*w + ci*d*h*w + di*h*w + hi*w + wi; dst[dst_offset] = src[src_offset]; } // ci } // wi } // hi } // di } // ni } //////////////////////////////////////////////////////////////////////////////////////////////////// // // R A N D O M I N I T I A L I Z A T I O N // //////////////////////////////////////////////////////////////////////////////////////////////////// static inline void random_init(float *dst, size_t n, int range, float scale, bool use_1s, bool verbose) { if( verbose ) { printf("Range.........: %d\n", range); printf("Scale.........: %f\n", scale); printf("Use.1s........: %s\n", use_1s ? "true" : "false"); printf("Address.......: 0x%016lx\n", (size_t) dst); printf("Values........: "); } for( size_t ii = 0; ii < n; ++ii ) { float x = 1.f; if( !use_1s ) { x = (float) (rand() % range - range / 2) * scale; //x = (float) (ii % 32); } if( verbose && ii < 8 ) { printf("%.3f ", x); } dst[ii] = x; } if( verbose ) { printf("...\n"); } } //////////////////////////////////////////////////////////////////////////////////////////////////// // // S I Z E O F A D A T A T Y P E // //////////////////////////////////////////////////////////////////////////////////////////////////// static inline size_t get_size_in_bytes(size_t n, Data_type dtype) { switch( dtype ) { case DATA_TYPE_FP64: return n * 8; case DATA_TYPE_TF32: return n * 4; case DATA_TYPE_FP32: return n * 4; case DATA_TYPE_FP16: return n * 2; case DATA_TYPE_INT32: return n * 4; case DATA_TYPE_INT8: return n; case DATA_TYPE_INT4: return n / 2; case DATA_TYPE_BOOL: return n / 8; case DATA_TYPE_BF16: return n * 2; case DATA_TYPE_INT8x32: return n; default: assert(false); return 0; } } //////////////////////////////////////////////////////////////////////////////////////////////////// static inline int elements_per_ldg(int size_in_bytes, Data_type dtype) { switch( dtype ) { case DATA_TYPE_FP64: return size_in_bytes / 8; case DATA_TYPE_TF32: return size_in_bytes / 4; case DATA_TYPE_BF16: return size_in_bytes / 2; case DATA_TYPE_FP32: return size_in_bytes / 4; case DATA_TYPE_FP16: return size_in_bytes / 2; case DATA_TYPE_INT32: return size_in_bytes / 4; case DATA_TYPE_INT8: return size_in_bytes; case DATA_TYPE_INT4: return size_in_bytes * 2; case DATA_TYPE_BOOL: return size_in_bytes * 8; default: assert(false); return 0; } } //////////////////////////////////////////////////////////////////////////////////////////////////// // // C U D A C O P I E S // //////////////////////////////////////////////////////////////////////////////////////////////////// static inline cudaError cuda_memcpy_h2d(void *dst, const float *src, size_t n, Data_type dtype) { size_t sz = get_size_in_bytes(n, dtype); void *tmp = malloc(sz); convert_from_float(tmp, src, n, dtype); cudaError err = cudaMemcpy(dst, tmp, sz, cudaMemcpyHostToDevice); free(tmp); return err; } //////////////////////////////////////////////////////////////////////////////////////////////////// static inline cudaError cuda_memcpy_d2h(float *dst, const void *src, size_t n, Data_type dtype) { size_t sz = get_size_in_bytes(n, dtype); void *tmp = malloc(sz); cudaError err = cudaMemcpy(tmp, src, sz, cudaMemcpyDeviceToHost); convert_to_float(dst, tmp, n, dtype); free(tmp); return err; } //////////////////////////////////////////////////////////////////////////////////////////////////// static inline cudaError cuda_memcpy_to_ncdhw_h2d(void *dst, const float *src, int n, int d, int h, int w, int c, Data_type dtype) { size_t ndhwc = (size_t) n * d * h * w * c; float *tmp = (float*) malloc(ndhwc * sizeof(float)); ndhwc_to_ncdhw(tmp, src, n, d, h, w, c); cudaError err = cuda_memcpy_h2d(dst, tmp, ndhwc, dtype); free(tmp); return err; } //////////////////////////////////////////////////////////////////////////////////////////////////// #endif // SAMPLES
huffcode.c
/* * huffcode - Encode/Decode files using Huffman encoding. * http://huffman.sourceforge.net * Copyright (C) 2003 Douglas Ryan Richardson */ #include "huffman.h" #include <stdio.h> #include <string.h> #include <errno.h> #include <stdlib.h> #include <assert.h> #include <mpi.h> #ifdef WIN32 #include <malloc.h> extern int getopt(int, char**, char*); extern char* optarg; #else #include <unistd.h> #endif #define THREADS 4 static unsigned int memory_encode_read_file(FILE *in, unsigned char **buf, unsigned long sz); static unsigned int memory_decode_read_file(FILE *in, unsigned char **buf, unsigned long sz); static void version(FILE *out) { fputs("huffcode 0.3\n" "Copyright (C) 2003 Douglas Ryan Richardson" "; Gauss Interprise, Inc\n", out); } static void usage(FILE* out) { fputs("Usage: huffcode [-i<input file>] [-o<output file>] [-d|-c]\n" "-i - input file (default is standard input)\n" "-o - output file (default is standard output)\n" "-d - decompress\n" "-c - compress (default)\n" "-m - read file into memory, compress, then write to file (not default)\n", out); } int main(int argc, char** argv) { unsigned char *buf = NULL; char memory = 1; char compress = 1; int opt; unsigned int i, j, cur; const char *file_in = NULL, *file_out = NULL; unsigned char* bufout = NULL; unsigned int bufoutlen = 0; int rank = -1, nTasks = -1; /* Initialize MPI execution environment */ MPI_Init (&argc, &argv); /* Determines the rank/ID of the current task */ MPI_Comm_rank (MPI_COMM_WORLD, &rank); /* Gives the number of tasks */ MPI_Comm_size (MPI_COMM_WORLD, &nTasks); FILE *out = stdout; /* Get the command line arguments. */ while((opt = getopt(argc, argv, "i:o:cdhvm")) != -1) { switch(opt) { case 'i': file_in = optarg; break; case 'o': file_out = optarg; break; case 'c': compress = 1; break; case 'd': compress = 0; break; case 'h': usage(stdout); return 0; case 'v': version(stdout); return 0; default: usage(stderr); return 1; } } FILE *fp; /* If an input file is given then open it * on several positions */ if(file_in) { fp = fopen(file_in, "rb"); if(!fp) { fprintf(stderr, "Can't open input file '%s': %s\n", file_in, strerror(errno)); exit(1); } } /* If an output file is given then create it. */ if(file_out) { out = fopen(file_out, "wb"); if(!out) { fprintf(stderr, "Can't open output file '%s': %s\n", file_out, strerror(errno)); return 1; } } /** * Get file size */ unsigned long sz = 0; int to_read[nTasks]; int displs[nTasks]; if (rank == 0) { fseek(fp, 0L, SEEK_END); sz = (unsigned long)ftell(fp); fseek(fp, 0L, SEEK_SET); } /** * Sending the size to read */ MPI_Bcast (&sz, 1, MPI_UNSIGNED_LONG, 0, MPI_COMM_WORLD); for (i = 0; i < nTasks; ++i) { if (i == nTasks - 1) { to_read[nTasks - 1] = sz - (nTasks - 1) * (sz / nTasks); } else { to_read[i] = sz / nTasks; } displs[i] = 0; for (j = 0; j < i; ++j) { displs[i] += to_read[j]; } } /** * Increment each file pointer to its specific chunk size */ fseek(fp, rank * (unsigned long) (sz / nTasks), SEEK_SET); if(memory) { if (compress) { cur = memory_encode_read_file(fp, &buf, to_read[rank]); // printf("rank: %d, to_read: %d\n", rank, to_read[rank]); /** * Copy the contents of all * partial buffers into one */ char *text; text = malloc(sz * sizeof(char)); MPI_Gatherv(buf, to_read[rank], MPI_CHAR, text, to_read, displs, MPI_CHAR, 0, MPI_COMM_WORLD); MPI_Bcast (text, sz, MPI_CHAR, 0, MPI_COMM_WORLD); /** * Do actual huffman algorithm * TODO - add 1 thread to write to memory the table * - add 4 threads to write to memory their segments of content */ if(huffman_encode_memory(text, sz, &bufout, &bufoutlen, rank, nTasks, MPI_COMM_WORLD)) { free(text); return 1; } // free(scarlat); if (rank == 0) { // Write the memory to the file. if(fwrite(bufout, 1, bufoutlen, out) != bufoutlen) { free(bufout); return 1; } free(bufout); } } else { // int a, pos = 0; // unsigned long size = sz / THREADS; // #pragma omp parallel for schedule(dynamic) \ // num_threads(THREADS) // for(i = 0; i < THREADS; ++i) { // if (i == THREADS - 1) { // size = sz - (THREADS - 1) * size; // } // cur[i] = memory_decode_read_file(fp[i], &buf[i], size); // } // unsigned int sum = 0; // for(i = 0; i < THREADS; i++) { // sum += cur[i]; // } // char *scarlat = malloc(sum * sizeof(char)); // for (i = 0; i < THREADS; ++i) { // memcpy(scarlat + pos, buf[i], cur[i]); // pos += cur[i]; // } // // for (i = 0; i < THREADS; i++) { // // free(buf[i]); // // buf[i] = NULL; // // } // /* Decode the memory. */ // if(huffman_decode_memory(scarlat, sum, &bufout, &bufoutlen)) // { // free(scarlat); // return 1; // } // free(scarlat); // // Write the memory to the file. // if(fwrite(bufout, 1, bufoutlen, out) != bufoutlen) // { // free(bufout); // return 1; // } // free(bufout); } MPI_Finalize(); return 0; } } static unsigned int memory_encode_read_file(FILE *in, unsigned char **buf, unsigned long sz) { unsigned int i, len = 0, cur = 0, inc = 1024; assert(in); /* Read the file into memory. */ for(i = 0; i < (unsigned int)sz; i += inc) { //printf("%d\n", omp_get_thread_num()); unsigned char *tmp; len += inc; tmp = (unsigned char*)realloc(*buf, len); if(!tmp) { if(*buf) free(buf); return -1; } *buf = tmp; if(cur + inc > sz) { cur += fread(*buf + cur, 1, (unsigned int)(sz - cur), in); } else { cur += fread(*buf + cur, 1, inc, in); } } if(NULL != *buf) { return cur; } return -1; } static unsigned int memory_decode_read_file(FILE *in, unsigned char **buf, unsigned long sz) { unsigned int i, len = 0, cur = 0, inc = 1024; assert(in); /* Read the file into memory. */ for (i = 0; i < (unsigned int)sz; i+=inc) { unsigned char *tmp; len += inc; tmp = (unsigned char*)realloc(*buf, len); if(!tmp) { if(*buf) { free(*buf); } return 1; } *buf = tmp; if(cur + inc > sz) { cur += fread(*buf + cur, 1, (unsigned int)(sz - cur), in); } else { cur += fread(*buf + cur, 1, inc, in); } } if(NULL != *buf) { return cur; } return -1; }
deconvolution_pack1ton_fp16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void deconvolution_pack1ton_fp16s_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_fp16, const Mat& bias_data, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt) { const int packn = csrr_vlenb() / 2; const word_type vl = vsetvl_e16m1(packn); int w = bottom_blob.w; int h = bottom_blob.h; int channels = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int kernel_extent_w = dilation_w * (kernel_w - 1) + 1; const int kernel_extent_h = dilation_h * (kernel_h - 1) + 1; const int maxk = kernel_w * kernel_h; const float* bias_data_ptr = bias_data; // num_output #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { __fp16* outptr = top_blob.channel(p); for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { vfloat32m2_t _sum = vfmv_v_f_f32m2(0.f, vl); if (bias_data_ptr) { _sum = vle32_v_f32m2(bias_data_ptr + p * packn, vl); } const __fp16* kptr = (const __fp16*)weight_data_fp16 + maxk * channels * p * packn; // channels for (int q = 0; q < channels; q++) { const Mat m = bottom_blob.channel(q); for (int y = 0; y < kernel_h; y++) { int sys = (i + y * dilation_h - (kernel_extent_h - 1)); if (sys < 0 || sys % stride_h != 0) continue; int sy = sys / stride_h; if (sy >= h) continue; const __fp16* sptr = m.row<const __fp16>(sy); for (int x = 0; x < kernel_w; x++) { int sxs = (j + x * dilation_w - (kernel_extent_w - 1)); if (sxs < 0 || sxs % stride_w != 0) continue; int sx = sxs / stride_w; if (sx >= w) continue; __fp16 val = sptr[sx]; int k = y * kernel_w + x; vfloat16m1_t _w = vle16_v_f16m1(kptr + k * packn, vl); _sum = vfwmacc_vf_f32m2(_sum, val, _w, vl); } } kptr += maxk * packn; } _sum = activation_ps(_sum, activation_type, activation_params, vl); vse16_v_f16m1(outptr + j * packn, vfncvt_f_f_w_f16m1(_sum, vl), vl); } outptr += outw * packn; } } } static void deconvolution_pack1ton_fp16sa_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_fp16, const Mat& bias_data_fp16, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt) { const int packn = csrr_vlenb() / 2; const word_type vl = vsetvl_e16m1(packn); int w = bottom_blob.w; int h = bottom_blob.h; int channels = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int kernel_extent_w = dilation_w * (kernel_w - 1) + 1; const int kernel_extent_h = dilation_h * (kernel_h - 1) + 1; const int maxk = kernel_w * kernel_h; const __fp16* bias_data_ptr = bias_data_fp16; // num_output #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { __fp16* outptr = top_blob.channel(p); for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { vfloat16m1_t _sum = vfmv_v_f_f16m1(0.f, vl); if (bias_data_ptr) { _sum = vle16_v_f16m1(bias_data_ptr + p * packn, vl); } const __fp16* kptr = (const __fp16*)weight_data_fp16 + maxk * channels * p * packn; // channels for (int q = 0; q < channels; q++) { const Mat m = bottom_blob.channel(q); for (int y = 0; y < kernel_h; y++) { int sys = (i + y * dilation_h - (kernel_extent_h - 1)); if (sys < 0 || sys % stride_h != 0) continue; int sy = sys / stride_h; if (sy >= h) continue; const __fp16* sptr = m.row<const __fp16>(sy); for (int x = 0; x < kernel_w; x++) { int sxs = (j + x * dilation_w - (kernel_extent_w - 1)); if (sxs < 0 || sxs % stride_w != 0) continue; int sx = sxs / stride_w; if (sx >= w) continue; __fp16 val = sptr[sx]; int k = y * kernel_w + x; vfloat16m1_t _w = vle16_v_f16m1(kptr + k * packn, vl); _sum = vfmacc_vf_f16m1(_sum, val, _w, vl); } } kptr += maxk * packn; } _sum = activation_ps(_sum, activation_type, activation_params, vl); vse16_v_f16m1(outptr + j * packn, _sum, vl); } outptr += outw * packn; } } }
opencl_blockchain_fmt_plug.c
/* blockchain "My Wallet" cracker patch for JtR. Hacked together during June of * 2013 by Dhiru Kholia <dhiru at openwall.com>. * * See https://blockchain.info/wallet/wallet-format * This software is Copyright (c) 2012 Lukas Odzioba <ukasz at openwall.net> * and Copyright (c) 2013 Dhiru Kholia <dhiru at openwall.com>, and it is * hereby released to the general public under the following terms: * * Redistribution and use in source and binary forms, with or without * modification, are permitted. * * improved dection, added iteration count and handle v2 hashes, Feb, 2015, JimF. */ #ifdef HAVE_OPENCL #if FMT_EXTERNS_H extern struct fmt_main fmt_opencl_blockchain; #elif FMT_REGISTERS_H john_register_one(&fmt_opencl_blockchain); #else #include <string.h> #include "aes.h" #ifdef _OPENMP #include <omp.h> #endif #include "arch.h" #include "formats.h" #include "common.h" #include "stdint.h" #include "jumbo.h" #include "common-opencl.h" #include "options.h" #define FORMAT_LABEL "blockchain-opencl" #define FORMAT_NAME "blockchain My Wallet" #define FORMAT_TAG "$blockchain$" #define TAG_LENGTH 12 #define ALGORITHM_NAME "PBKDF2-SHA1 OpenCL AES" #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #define BINARY_SIZE 0 #define PLAINTEXT_LENGTH 64 #define SALT_SIZE sizeof(struct custom_salt) #define BINARY_ALIGN MEM_ALIGN_WORD #define SALT_ALIGN 4 #define BIG_ENOUGH (8192 * 32) // increase me (in multiples of 16) to increase the decrypted and search area #define SAFETY_FACTOR 160 typedef struct { uint32_t length; uint8_t v[PLAINTEXT_LENGTH]; } blockchain_password; typedef struct { uint32_t v[32/4]; } blockchain_hash; typedef struct { int iterations; int outlen; uint8_t length; uint8_t salt[16]; } blockchain_salt; static int *cracked; static int any_cracked; static struct fmt_tests blockchain_tests[] = { {"$blockchain$400$53741f25a90ef521c90bb2fd73673e64089ff2cca6ba3cbf6f34e0f80f960b2f60b9ac48df009dc30c288dcf1ade5f16c70a3536403fc11a68f242ba5ad3fcceae3ca5ecd23905997474260aa1357fc322b1434ffa026ba6ad33707c9ad5260e7230b87d8888a45ddc27513adb30af8755ec0737963ae6bb281318c48f224e9c748f6697f75f63f718bebb3401d6d5f02cf62b1701c205762c2f43119b68771ed10ddab79b5f74f56d611f61f77b8b65b5b5669756017429633118b8e5b8b638667e44154de4cc76468c4200eeebda2711a65333a7e3c423c8241e219cdca5ac47c0d4479444241fa27da20dba1a1d81e778a037d40d33ddea7c39e6d02461d97185f66a73deedff39bc53af0e9b04a3d7bf43648303c9f652d99630cd0789819376d68443c85f0eeb7af7c83eecddf25ea912f7721e3fb73ccaedf860f0f033ffc990ed73db441220d0cbe6e029676fef264dc2dc497f39bedf4041ba355d086134744d5a36e09515d230cd499eb20e0c574fb1bd9d994ce26f53f21d06dd58db4f8e0efbcaee7038df793bbb3daa96", "strongpassword"}, {"$blockchain$384$ece598c58b22a3b245a02039ce36bdf589a86b6344e802b4a3ac9b727cc0b6977e9509bc1ac4d1b7b9cbf9089ecdc89706f0a469325f7ee218b2212b6cd3e32677be20eee91e267fe13ebded02946d4ae1163ef22b3dca327d7390091247ac770288a0c7be181b21a48a8f945d9913cdfdc4cfd739ee3a41ced11cacde22e3233250e36f8b8fb4d81de5298a84374af75b88afda3438eed232e52aa0eb29e0d475456c86ae9d1aaadca14bc25f273c93fd4d7fd8316ed5306733bca77e8214277edd3155342abe0710985dc20b4f80e6620e386aa7658f92df25c7c932f0eb1beca25253662bd558647a3ba741f89450bfdba59a0c016477450fbcecd62226626e06ed2e3f5a4180e32d534c7769bcd1160aad840cfd3b7b13a90d34fedb3408fe74379a9e8a840fe3bfee8e0ee01f77ee389613fa750c3d2771b83eeb4e16598f76c15c311c325bd5d54543571aa20934060e332f451e58d67ad0f4635c0c021fa76821a68d64f1a5fb6fd70365eef4442cedcc91eb8696d52d078807edd89d", "qwertyuiop1"}, /* here is a v2 hash. NOTE, it uses 5000 pbkdf2 for the hash */ {"$blockchain$v2$5000$544$9a4d5157d4969636b2fe0738f77a376feda2fb979738c5cf0e712f5d4a2f001608824a865d25041bc85e0ad35985999fcfae7d218eb109a703781f57e7b5a03c29ffdfb756ec8ee38ed8941b056922cdd174c8e89feb40e1a0e1766792845f57992ae9d7667eff41a5e5580f3f289b050d76cc0f049cbd30c675efc3a553c0f19f30cb9589c7c3773dd095de92a991963789408351f543c1dc307751e5f781c278da77270035e3743df01ab4e41155b6437d9c7e64388a28f8331aca4822e6b89cdd5f45061b99768218d853a3575bbd029564826bcb188d55444273cda588d4e593fc5d29696713d747cfc8302a3e9c9dbb1bb3754c2e00f28b69d8faeb2e45c04085359c6a9b6bfecfd0a6a8f27ad647b6bfd498f2224a8c0442f7fe730656263ac2869923b296ad9955dbad515b4f88ad33619bdacc33ae7f14c65fce029e0f9e4a9c414716d9a23e4361aa264493bb6fc9a7fda82599b0232174b9fc92a1c717ca2cc6deb8bd6aaf3706b95fdfdc582316cb3d271178dafe3a6704a918e07be057bef676bb144840c7f26676f183f2744fc2fe22c9c3feb7461b4383981c00b6fff403fef578f6e5464dc2d0bcb7b8d0dc2e7add502b34c8fe9f9b638eebe7ede25e351b17ea8b8c1f5213b69780c0ba7ef3d5734c0635e9d2ee49524914f047d45536180be25e7610db809db694ceeb16a3bfd8abd5ab0cda4415203408387698fe707568566f7f567164707091a806ac2d11b9b9dd0c3c991ff037f457", "Openwall1234#"}, /* this is the 'raw' hash to the line above. We do not handle this yet, but probably should. It is also mime, and not base-16 */ //{"{\"pbkdf2_iterations\":5000,\"version\":2,\"payload\":\"mk1RV9SWljay/gc493o3b+2i+5eXOMXPDnEvXUovABYIgkqGXSUEG8heCtNZhZmfz659IY6xCacDeB9X57WgPCn/37dW7I7jjtiUGwVpIs3RdMjon+tA4aDhdmeShF9XmSrp12Z+/0Gl5VgPPyibBQ12zA8EnL0wxnXvw6VTwPGfMMuVicfDdz3Qld6SqZGWN4lAg1H1Q8HcMHdR5feBwnjadycANeN0PfAatOQRVbZDfZx+ZDiKKPgzGspIIua4nN1fRQYbmXaCGNhTo1dbvQKVZIJryxiNVURCc82liNTlk/xdKWlnE9dHz8gwKj6cnbsbs3VMLgDyi2nY+usuRcBAhTWcaptr/s/QpqjyetZHtr/UmPIiSowEQvf+cwZWJjrChpkjspatmVXbrVFbT4itM2Gb2swzrn8Uxl/OAp4PnkqcQUcW2aI+Q2GqJkSTu2/Jp/2oJZmwIyF0ufySoccXyizG3ri9aq83Brlf39xYIxbLPScReNr+OmcEqRjge+BXvvZ2uxRIQMfyZnbxg/J0T8L+IsnD/rdGG0ODmBwAtv/0A/71ePblRk3C0Ly3uNDcLnrdUCs0yP6fm2OO6+ft4l41Gxfqi4wfUhO2l4DAun7z1XNMBjXp0u5JUkkU8EfUVTYYC+JedhDbgJ22lM7rFqO/2KvVqwzaRBUgNAg4dpj+cHVoVm9/VnFkcHCRqAasLRG5ud0MPJkf8Df0Vw==\"}", "Openwall1234#"}, {NULL} }; static struct custom_salt { unsigned char data[BIG_ENOUGH]; int length; int iter; } *cur_salt; static cl_int cl_error; static blockchain_password *inbuffer; static blockchain_hash *outbuffer; static blockchain_salt currentsalt; static cl_mem mem_in, mem_out, mem_setting; static struct fmt_main *self; size_t insize, outsize, settingsize, cracked_size; #define STEP 0 #define SEED 256 // This file contains auto-tuning routine(s). Has to be included after formats definitions. #include "opencl-autotune.h" #include "memdbg.h" static const char * warn[] = { "xfer: ", ", crypt: ", ", xfer: " }; /* ------- Helper functions ------- */ static size_t get_task_max_work_group_size() { return autotune_get_task_max_work_group_size(FALSE, 0, crypt_kernel); } static void create_clobj(size_t gws, struct fmt_main *self) { insize = sizeof(blockchain_password) * gws; outsize = sizeof(blockchain_hash) * gws; settingsize = sizeof(blockchain_salt); cracked_size = sizeof(*cracked) * gws; inbuffer = mem_calloc(1, insize); outbuffer = mem_alloc(outsize); cracked = mem_calloc(1, cracked_size); /// Allocate memory mem_in = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, insize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem in"); mem_setting = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, settingsize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem setting"); mem_out = clCreateBuffer(context[gpu_id], CL_MEM_WRITE_ONLY, outsize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem out"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 0, sizeof(mem_in), &mem_in), "Error while setting mem_in kernel argument"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 1, sizeof(mem_out), &mem_out), "Error while setting mem_out kernel argument"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 2, sizeof(mem_setting), &mem_setting), "Error while setting mem_salt kernel argument"); } static void release_clobj(void) { if (cracked) { HANDLE_CLERROR(clReleaseMemObject(mem_in), "Release mem in"); HANDLE_CLERROR(clReleaseMemObject(mem_setting), "Release mem setting"); HANDLE_CLERROR(clReleaseMemObject(mem_out), "Release mem out"); MEM_FREE(inbuffer); MEM_FREE(outbuffer); MEM_FREE(cracked); } } static void done(void) { if (autotuned) { release_clobj(); HANDLE_CLERROR(clReleaseKernel(crypt_kernel), "Release kernel"); HANDLE_CLERROR(clReleaseProgram(program[gpu_id]), "Release Program"); autotuned--; } } static void init(struct fmt_main *_self) { self = _self; opencl_prepare_dev(gpu_id); } static void reset(struct db_main *db) { if (!autotuned) { char build_opts[64]; snprintf(build_opts, sizeof(build_opts), "-DKEYLEN=%d -DSALTLEN=%d -DOUTLEN=%d", PLAINTEXT_LENGTH, (int)sizeof(currentsalt.salt), (int)sizeof(outbuffer->v)); opencl_init("$JOHN/kernels/pbkdf2_hmac_sha1_unsplit_kernel.cl", gpu_id, build_opts); crypt_kernel = clCreateKernel(program[gpu_id], "derive_key", &cl_error); HANDLE_CLERROR(cl_error, "Error creating kernel"); // Initialize openCL tuning (library) for this format. opencl_init_auto_setup(SEED, 0, NULL, warn, 1, self, create_clobj, release_clobj, sizeof(blockchain_password), 0, db); // Auto tune execution from shared/included code. autotune_run(self, 1, 0, 1000); } } static int valid(char *ciphertext, struct fmt_main *self) { char *ctcopy, *keeptr, *p; int len; if (strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH) != 0) return 0; ctcopy = strdup(ciphertext); keeptr = ctcopy; ctcopy += TAG_LENGTH; if ((p = strtokm(ctcopy, "$")) == NULL) goto err; if (!strcmp(p, "v2")) { if ((p = strtokm(NULL, "$")) == NULL) goto err; if (!isdec(p)) goto err; if ((p = strtokm(NULL, "$")) == NULL) goto err; } if (!isdec(p)) goto err; len = atoi(p); if(len > BIG_ENOUGH || !len) goto err; if ((p = strtokm(NULL, "$")) == NULL) goto err; if (hexlenl(p) != len * 2) goto err; MEM_FREE(keeptr); return 1; err: MEM_FREE(keeptr); return 0; } static void *get_salt(char *ciphertext) { char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; int i; char *p; static union { struct custom_salt _cs; ARCH_WORD_32 dummy; } un; struct custom_salt *cs = &(un._cs); memset(&un, 0, sizeof(un)); ctcopy += TAG_LENGTH; p = strtokm(ctcopy, "$"); if (!strcmp(p, "v2")) { p = strtokm(NULL, "$"); cs->iter = atoi(p); p = strtokm(NULL, "$"); } else cs->iter = 10; cs->length = atoi(p); p = strtokm(NULL, "$"); for (i = 0; i < cs->length; i++) cs->data[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; MEM_FREE(keeptr); return (void *)cs; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; memcpy((char*)currentsalt.salt, cur_salt->data, 16); currentsalt.length = 16; currentsalt.iterations = cur_salt->iter; currentsalt.outlen = 32; HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_setting, CL_FALSE, 0, settingsize, &currentsalt, 0, NULL, NULL), "Copy salt to gpu"); } #undef set_key static void set_key(char *key, int index) { uint8_t length = strlen(key); if (length > PLAINTEXT_LENGTH) length = PLAINTEXT_LENGTH; inbuffer[index].length = length; memcpy(inbuffer[index].v, key, length); } static char *get_key(int index) { static char ret[PLAINTEXT_LENGTH + 1]; uint8_t length = inbuffer[index].length; memcpy(ret, inbuffer[index].v, length); ret[length] = '\0'; return ret; } static int blockchain_decrypt(unsigned char *derived_key, unsigned char *data) { unsigned char out[SAFETY_FACTOR]; AES_KEY akey; unsigned char iv[16]; memcpy(iv, cur_salt->data, 16); if(AES_set_decrypt_key(derived_key, 256, &akey) < 0) { fprintf(stderr, "AES_set_decrypt_key failed in crypt!\n"); } AES_cbc_encrypt(data + 16, out, 16, &akey, iv, AES_DECRYPT); /* various tests */ if (out[0] != '{') // fast test return -1; // "guid" will be found in the first block if (memmem(out, 16, "\"guid\"", 6)) { memcpy(iv, cur_salt->data, 16); //IV has to be reset. AES_cbc_encrypt(data + 16, out, SAFETY_FACTOR, &akey, iv, AES_DECRYPT); if (memmem(out, SAFETY_FACTOR, "\"sharedKey\"", 11) && memmem(out, SAFETY_FACTOR, "\"options\"", 9)) // Note, we 'could' check that the guid and sharedKey values are // 'valid' GUID's, but there really is no point. We already have // 2^216 confidence in the simple text strings being found. return 0; } return -1; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index; size_t *lws = local_work_size ? &local_work_size : NULL; global_work_size = GET_MULTIPLE_OR_BIGGER(count, local_work_size); if (any_cracked) { memset(cracked, 0, cracked_size); any_cracked = 0; } /// Copy data to gpu BENCH_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_in, CL_FALSE, 0, insize, inbuffer, 0, NULL, multi_profilingEvent[0]), "Copy data to gpu"); /// Run kernel BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], crypt_kernel, 1, NULL, &global_work_size, lws, 0, NULL, multi_profilingEvent[1]), "Run kernel"); /// Read the result back BENCH_CLERROR(clEnqueueReadBuffer(queue[gpu_id], mem_out, CL_TRUE, 0, outsize, outbuffer, 0, NULL, multi_profilingEvent[2]), "Copy result back"); if (ocl_autotune_running) return count; #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index++) if (!blockchain_decrypt((unsigned char*)outbuffer[index].v, cur_salt->data)) { cracked[index] = 1; #ifdef _OPENMP #pragma omp atomic #endif any_cracked |= 1; } return count; } static int cmp_all(void *binary, int count) { return any_cracked; } static int cmp_one(void *binary, int index) { return cracked[index]; } static int cmp_exact(char *source, int index) { return 1; } struct fmt_main fmt_opencl_blockchain = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, { NULL }, blockchain_tests }, { init, done, reset, fmt_default_prepare, valid, fmt_default_split, fmt_default_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash }, fmt_default_salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */ #endif /* HAVE_OPENCL */
dropout-inl.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2015 by Contributors * \file dropout-inl.h * \brief * \author Bing Xu, Da Zheng, Hang Zhang */ #ifndef MXNET_OPERATOR_NN_DROPOUT_INL_H_ #define MXNET_OPERATOR_NN_DROPOUT_INL_H_ #include <dmlc/logging.h> #include <dmlc/parameter.h> #include <mxnet/operator.h> #include <map> #include <vector> #include <string> #include <utility> #include <algorithm> #include "../mxnet_op.h" #include "../mshadow_op.h" #include "../random/sampler.h" #include "../tensor/elemwise_binary_broadcast_op.h" #define MXNET_USE_MKL_DROPOUT defined(USE_MKL) && defined(_OPENMP) && !defined(__CUDACC__) #if MXNET_USE_MKL_DROPOUT #include <omp.h> #include <mkl_vml_functions.h> #include <mkl_vsl.h> #endif // MXNET_USE_MKL_DROPOUT #define MXNET_USE_CUDNN_DROPOUT MXNET_USE_CUDNN == 1 && CUDNN_MAJOR >= 7 namespace dropout { enum DropoutOpInputs {kData}; enum DropoutOpOutputs {kOut, kMask}; enum DropoutOpForwardResource {kRandom}; enum DropoutOpMode {kTraining, kAlways}; } // namespace dropout namespace mxnet { namespace op { const int MAX_DIM = 5; struct DropoutParam : public dmlc::Parameter<DropoutParam> { float p; int mode; TShape axes; dmlc::optional<bool> cudnn_off; DMLC_DECLARE_PARAMETER(DropoutParam) { DMLC_DECLARE_FIELD(p).set_default(0.5) .set_range(0, 1) .describe("Fraction of the input that gets dropped out during training time."); DMLC_DECLARE_FIELD(mode) .add_enum("training", dropout::kTraining) .add_enum("always", dropout::kAlways) .set_default(dropout::kTraining) .describe("Whether to only turn on dropout during training or to also turn on for inference."); DMLC_DECLARE_FIELD(axes).set_default(TShape()) .describe("Axes for variational dropout kernel."); DMLC_DECLARE_FIELD(cudnn_off).set_default(dmlc::optional<bool>(true)) .describe("Whether to turn off cudnn in dropout operator. " "This option is ignored if axes is specified."); } }; // struct DropoutParam template<typename xpu, typename DType> class DropoutOp { #if MXNET_USE_MKL_DROPOUT static void BernoulliGenerate(common::random::RandGenerator<cpu, DType> gen, int n, double p, int* r) { typename RandGenerator<xpu, DType>::Impl genImpl(&gen, 1); const int seed = 17 + abs(genImpl.rand() % 4096); CHECK_GE(seed, 0); const int nthr = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); #pragma omp parallel num_threads(nthr) { const int ithr = omp_get_thread_num(); const int avg_amount = (n + nthr - 1) / nthr; const int my_offset = ithr * avg_amount; const int my_amount = std::min(my_offset + avg_amount, n) - my_offset; if (my_amount > 0) { VSLStreamStatePtr stream; vslNewStream(&stream, VSL_BRNG_MCG31, seed); vslSkipAheadStream(stream, my_offset); viRngBernoulli(VSL_RNG_METHOD_BERNOULLI_ICDF, stream, my_amount, r + my_offset, p); vslDeleteStream(&stream); } } } static inline bool MKLAvailable() { // BernoulliGenerate expects an array int, so for types smaller than int, the mask buffer // will be too small, so we can;t use MKL in those cases return sizeof(DType) >= sizeof(int); } // MKL forward pass inline void MKLForward(const OpContext &ctx, const std::vector<TBlob> &in_data, const std::vector<TBlob> &out_data) { Stream<xpu> *s = ctx.get_stream<xpu>(); RandGenerator<xpu, DType> *pgen = ctx.requested[0].get_parallel_random<xpu, DType>(); CHECK_NOTNULL(pgen); Tensor<xpu, 2, DType> mask = out_data[dropout::kMask].FlatTo2D<xpu, DType>(s); Tensor<xpu, 2, DType> data = in_data[dropout::kData].FlatTo2D<xpu, DType>(s); Tensor<xpu, 2, DType> out = out_data[dropout::kOut].FlatTo2D<xpu, DType>(s); DType *outptr = out.dptr_; DType *dataptr = data.dptr_; auto maskptr = reinterpret_cast<int *>(mask.dptr_); int count = mask.shape_[0] * mask.shape_[1]; BernoulliGenerate(*pgen, count, this->pkeep_, maskptr); const float pk_1 = 1.0f / this->pkeep_; #pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount()) for (int i = 0; i < count; ++i) { outptr[i] = dataptr[i] * maskptr[i] * pk_1; } } // MKL backward pass inline void MKLBackward(const OpContext &ctx, const std::vector<TBlob> &in_grad, const std::vector<TBlob> &out_data, const std::vector<TBlob> &out_grad) { Stream<xpu> *s = ctx.get_stream<xpu>(); Tensor<xpu, 2, DType> grad = out_grad[dropout::kOut].FlatTo2D<xpu, DType>(s); Tensor<xpu, 2, DType> mask = out_data[dropout::kMask].FlatTo2D<xpu, DType>(s); Tensor<xpu, 2, DType> gdata = in_grad[dropout::kData].FlatTo2D<xpu, DType>(s); DType *ingradptr = gdata.dptr_; const DType *outgradptr = grad.dptr_; auto maskptr = reinterpret_cast<int *>(mask.dptr_); int count = mask.shape_[0] * mask.shape_[1]; const float pk_1 = 1.0f / this->pkeep_; #pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount()) for (int i = 0; i < count; ++i) { ingradptr[i] = outgradptr[i] * maskptr[i] * pk_1; } } #endif // #if MXNET_USE_MKL_DROPOUT public: /*! * \brief Dropout kernel, compute dropout tensor */ struct DropoutKernel { /*! * \brief Dropout kernel function * \param id Thread number (0-based representing count) * \param gen Random number generator * \param N Total number of items in the output * \param step Step between items, related to parallelism * \param dropout_out Output dropout values * \param mask_out Output mask (is multiplied to create dropout output, may be 0) * \param input_data Input data to perform the dropout on * \param pkeep Dropout rate (keep when the generated random number is less than this value) */ MSHADOW_XINLINE static void Map(int id, RandGenerator<xpu, DType> gen, const int N, const int step, DType *dropout_out, DType *mask_out, const DType *input_data, const real_t pkeep) { RNG_KERNEL_LOOP(xpu, DType, id, gen, N, step, { const real_t rand_num = static_cast<real_t>(genImpl.uniform()); mask_out[i] = mshadow_op::threshold_eq::Map<real_t>(rand_num, pkeep) * (1.0f / pkeep); dropout_out[i] = input_data[i] * mask_out[i]; }); } }; struct BernoulliKernel { /*! \brief Bernoulli kernel for generating mask */ MSHADOW_XINLINE static void Map(int id, RandGenerator<xpu, DType> gen, const int N, const int step, DType *mask_out, const real_t pkeep) { RNG_KERNEL_LOOP(xpu, DType, id, gen, N, step, { const real_t rand_num = static_cast<real_t>(genImpl.uniform()); mask_out[i] = mshadow_op::threshold::Map<real_t>(rand_num, pkeep) * (1.0f / pkeep); }); } }; explicit DropoutOp(const DropoutParam &param, Context ctx) { this->pkeep_ = 1.0f - param.p; this->mode_ = static_cast<dropout::DropoutOpMode>(param.mode); this->axes_ = param.axes; this->dropout_passthrough_ = true; #if MXNET_USE_CUDNN_DROPOUT this->cudnn_off_ = param.cudnn_off && param.cudnn_off.value(); this->ctx_ = ctx; if (ctx.dev_type == kGPU && this->pkeep_ > 0 && !this->cudnn_off_) { dtype_ = mshadow::DataType<DType>::kCudnnFlag; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc_)); CUDNN_CALL(cudnnCreateTensorDescriptor(&y_desc_)); CUDNN_CALL(cudnnCreateTensorDescriptor(&dx_desc_)); CUDNN_CALL(cudnnCreateTensorDescriptor(&dy_desc_)); CUDNN_CALL(cudnnCreateDropoutDescriptor(&dropout_desc_)); } #endif // MXNET_USE_CUDNN_DROPOUT } ~DropoutOp() { #if MXNET_USE_CUDNN_DROPOUT if (this->ctx_.dev_type == kGPU && this->pkeep_ > 0 && !this->cudnn_off_) { CUDNN_CALL(cudnnDestroyTensorDescriptor(x_desc_)); CUDNN_CALL(cudnnDestroyTensorDescriptor(y_desc_)); CUDNN_CALL(cudnnDestroyTensorDescriptor(dx_desc_)); CUDNN_CALL(cudnnDestroyTensorDescriptor(dy_desc_)); CUDNN_CALL(cudnnDestroyDropoutDescriptor(dropout_desc_)); } #endif // MXNET_USE_CUDNN_DROPOUT } #if MXNET_USE_CUDNN_DROPOUT && defined(__CUDACC__) inline bool CuDNNAvailable() { return this->pkeep_ > 0 && !this->cudnn_off_; } inline void CuDNNForward(const OpContext &ctx, const TBlob &in, const TBlob &mask, const TBlob &out) { Stream<xpu> *s = ctx.get_stream<xpu>(); // set dropout state. ctx.requested[0].get_cudnn_dropout_desc(&dropout_desc_, s, 1.0f - this->pkeep_, seed_); // describe input/output tensor int dim[4], stride[4]; dim[0] = 1; dim[1] = 1; dim[2] = 1; dim[3] = out.Size(); stride[0] = out.Size(); stride[1] = out.Size(); stride[2] = out.Size(); stride[3] = 1; CUDNN_CALL(cudnnSetTensorNdDescriptor(x_desc_, dtype_, 4, dim, stride)); CUDNN_CALL(cudnnSetTensorNdDescriptor(y_desc_, dtype_, 4, dim, stride)); // perform dropout with cudnn CUDNN_CALL(cudnnDropoutGetReserveSpaceSize(x_desc_, &dropout_reserve_byte_)); // cudnn uses bits to record the positions that are dropped, so reserve bytes is always // 1/8 of input size. CHECK_GE(mask.Size() * sizeof(DType), dropout_reserve_byte_) << "The size of the mask space is smaller than the required cudnn reserved space."; CUDNN_CALL(cudnnDropoutForward(s->dnn_handle_, dropout_desc_, x_desc_, in.dptr<DType>(), y_desc_, out.dptr<DType>(), mask.dptr<DType>(), dropout_reserve_byte_)); } inline void CuDNNBackward(const OpContext &ctx, const TBlob &out_grad, const TBlob &mask, const TBlob &in_grad) { Stream<xpu> *s = ctx.get_stream<xpu>(); // describe input/output tensor int dim[4], stride[4]; dim[0] = 1; dim[1] = 1; dim[2] = 1; dim[3] = in_grad.Size(); stride[0] = in_grad.Size(); stride[1] = in_grad.Size(); stride[2] = in_grad.Size(); stride[3] = 1; CUDNN_CALL(cudnnSetTensorNdDescriptor(dy_desc_, dtype_, 4, dim, stride)); CUDNN_CALL(cudnnSetTensorNdDescriptor(dx_desc_, dtype_, 4, dim, stride)); // perform dropout with cudnn CUDNN_CALL(cudnnDropoutBackward(s->dnn_handle_, dropout_desc_, dy_desc_, out_grad.dptr<DType>(), dx_desc_, in_grad.dptr<DType>(), mask.dptr<DType>(), dropout_reserve_byte_)); } #endif // MXNET_USE_CUDNN_DROPOUT && defined(__CUDACC__) void Forward(const OpContext &ctx, const std::vector<TBlob> &in_data, const std::vector<OpReqType> &req, const std::vector<TBlob> &out_data) { this->dropout_passthrough_ = true; if (req[dropout::kOut] != kNullOp) { CHECK_EQ(in_data.size(), 1U); if (ctx.is_train) { CHECK_EQ(out_data.size(), 2U); } Stream<xpu> *s = ctx.get_stream<xpu>(); const TBlob &in = in_data[dropout::kData]; const TBlob &out = out_data[dropout::kOut]; const TBlob &mask = out_data[dropout::kMask]; if (this->pkeep_ < 1 && (ctx.is_train || this->mode_ == dropout::kAlways)) { this->dropout_passthrough_ = false; if (this->axes_.ndim() == 0) { #if MXNET_USE_MKL_DROPOUT if (MKLAvailable()) { MKLForward(ctx, in_data, out_data); return; } #endif // MXNET_USE_MKL_DROPOUT #if MXNET_USE_CUDNN_DROPOUT && defined(__CUDACC__) if (CuDNNAvailable()) { CuDNNForward(ctx, in, mask, out); return; } #endif // MXNET_USE_CUDNN_DROPOUT && defined(__CUDACC__) RandGenerator<xpu, DType> *pgen = ctx.requested[0].get_parallel_random<xpu, DType>(); CHECK_NOTNULL(pgen); CHECK(req[dropout::kOut] != kAddTo); LaunchRNG<DropoutKernel, xpu>(s, pgen, out.Size(), out.dptr<DType>(), mask.dptr<DType>(), in.dptr<DType>(), this->pkeep_); return; } else { RandGenerator<xpu, DType> *pgen = ctx.requested[0].get_parallel_random<xpu, DType>(); CHECK_NOTNULL(pgen); // initialize the mask LaunchRNG<BernoulliKernel, xpu>(s, pgen, mask.Size(), mask.dptr<DType>(), this->pkeep_); // broadcast mul TShape new_lshape, new_rshape, new_oshape; int ndim = BinaryBroadcastShapeCompact(in.shape_, mask.shape_, out.shape_, &new_lshape, &new_rshape, &new_oshape); if (!ndim) { MXNET_ASSIGN_REQ_SWITCH(req[dropout::kOut], Req, { mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::mul, Req>, xpu>::Launch( s, out.Size(), out.dptr<DType>(), in.dptr<DType>(), mask.dptr<DType>()); }); } else { BROADCAST_NDIM_SWITCH(ndim, NDim, { mshadow::Shape<NDim> oshape = new_oshape.get<NDim>(); mshadow::Shape<NDim> lstride = mxnet_op::calc_stride(new_lshape.get<NDim>()); mshadow::Shape<NDim> rstride = mxnet_op::calc_stride(new_rshape.get<NDim>()); mxnet_op::Kernel<mxnet_op::binary_broadcast_kernel<NDim, DType, mshadow_op::mul>, xpu>:: template LaunchEx(s, new_oshape.Size(), req[dropout::kOut], lstride, rstride, oshape, in.dptr<DType>(), mask.dptr<DType>(), out.dptr<DType>()); }); } } } else { MXNET_ASSIGN_REQ_SWITCH(req[dropout::kOut], Req, { mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::identity, Req>, xpu>::Launch( s, out.Size(), out.dptr<DType>(), in.dptr<DType>()); }); } } } void Backward(const OpContext &ctx, const std::vector<TBlob> &out_grad, const std::vector<TBlob> &out_data, const std::vector<OpReqType> &req, const std::vector<TBlob> &in_grad) { using namespace mshadow; using namespace mshadow::expr; Stream<xpu> *s = ctx.get_stream<xpu>(); if (!this->dropout_passthrough_) { this->dropout_passthrough_ = true; const TBlob &gdata = in_grad[dropout::kData]; const TBlob &grad = out_grad[dropout::kOut]; const TBlob &mask = out_data[dropout::kMask]; if (this->axes_.ndim() == 0) { #if MXNET_USE_MKL_DROPOUT if (MKLAvailable()) { MKLBackward(ctx, in_grad, out_data, out_grad); return; } #endif // MXNET_USE_MKL_DROPOUT #if MXNET_USE_CUDNN_DROPOUT && defined(__CUDACC__) if (CuDNNAvailable()) { CuDNNBackward(ctx, grad, mask, gdata); return; } #endif // MXNET_USE_CUDNN_DROPOUT && defined(__CUDACC__) // standard case for dropout CHECK_EQ(grad.Size(), mask.Size()); MXNET_ASSIGN_REQ_SWITCH(req[dropout::kData], Req, { mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::mul, Req>, xpu>::Launch( s, gdata.Size(), gdata.dptr<DType>(), grad.dptr<DType>(), mask.dptr<DType>()); }); return; } else { // broardcast mul TShape new_lshape, new_rshape, new_oshape; int ndim = BinaryBroadcastShapeCompact(grad.shape_, mask.shape_, gdata.shape_, &new_lshape, &new_rshape, &new_oshape); if (!ndim) { MXNET_ASSIGN_REQ_SWITCH(req[dropout::kData], Req, { mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::mul, Req>, xpu>::Launch( s, gdata.Size(), gdata.dptr<DType>(), grad.dptr<DType>(), mask.dptr<DType>()); }); } else { BROADCAST_NDIM_SWITCH(ndim, NDim, { mshadow::Shape<NDim> oshape = new_oshape.get<NDim>(); mshadow::Shape<NDim> lstride = mxnet_op::calc_stride(new_lshape.get<NDim>()); mshadow::Shape<NDim> rstride = mxnet_op::calc_stride(new_rshape.get<NDim>()); mxnet_op::Kernel<mxnet_op::binary_broadcast_kernel<NDim, DType, mshadow_op::mul>, xpu>:: template LaunchEx(s, new_oshape.Size(), req[0], lstride, rstride, oshape, grad.dptr<DType>(), mask.dptr<DType>(), gdata.dptr<DType>()); }); } } } else { const TBlob& gdata = in_grad[dropout::kData]; const TBlob& grad = out_grad[dropout::kOut]; MXNET_ASSIGN_REQ_SWITCH(req[dropout::kData], Req, { mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::identity, Req>, xpu>::Launch( s, gdata.Size(), gdata.dptr<DType>(), grad.dptr<DType>()); }); } } private: /*! \brief Dropout rate (keep when the generated random number is less than this value) */ real_t pkeep_; /*! \brief Dropout mode */ dropout::DropoutOpMode mode_; /*! \brief Axes on which dropout mask is shared in the form of broadcast multiply */ TShape axes_; /*! \brief Flag to record whether forward is executed in pass-through mode */ bool dropout_passthrough_; #if MXNET_USE_CUDNN_DROPOUT bool cudnn_off_; Context ctx_; cudnnDataType_t dtype_; cudnnDropoutDescriptor_t dropout_desc_; uint64_t seed_ = 17 + rand() % 4096; // NOLINT(runtime/threadsafe_fn) size_t dropout_reserve_byte_; cudnnTensorDescriptor_t x_desc_, y_desc_, dx_desc_, dy_desc_; #endif // MXNET_USE_CUDNN_DROPOUT }; // class DropoutOp static OpStatePtr CreateDropoutState(const nnvm::NodeAttrs &attrs, const Context ctx, const std::vector<TShape> &in_shapes, const std::vector<int> &in_types) { const DropoutParam& param = nnvm::get<DropoutParam>(attrs.parsed); OpStatePtr state; MSHADOW_REAL_TYPE_SWITCH(in_types[dropout::kData], DType, { if (ctx.dev_type == kGPU) { state = OpStatePtr::Create<DropoutOp<gpu, DType>>(param, ctx); } else { state = OpStatePtr::Create<DropoutOp<cpu, DType>>(param, ctx); } return state; }); LOG(FATAL) << "should never reach here"; return OpStatePtr(); // should never reach here } template<typename xpu> void DropoutCompute(const OpStatePtr& state, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { MSHADOW_REAL_TYPE_SWITCH(inputs[0].type_flag_, DType, { DropoutOp<xpu, DType>& op = state.get_state<DropoutOp<xpu, DType>>(); op.Forward(ctx, inputs, req, outputs); }); } template<typename xpu> void DropoutGradCompute(const OpStatePtr& state, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { CHECK_EQ(inputs.size(), 2U); CHECK_EQ(outputs.size(), 1); CHECK_EQ(req.size(), 1); std::vector<TBlob> out_grads(2); std::vector<TBlob> out_data(2); out_grads[dropout::kOut] = inputs[0]; out_data[dropout::kMask] = inputs[1]; MSHADOW_REAL_TYPE_SWITCH(inputs[0].type_flag_, DType, { DropoutOp<xpu, DType>& op = state.get_state<DropoutOp<xpu, DType>>(); op.Backward(ctx, out_grads, out_data, req, outputs); }); } } // namespace op } // namespace mxnet #undef MXNET_USE_MKL_DROPOUT #endif // MXNET_OPERATOR_NN_DROPOUT_INL_H_
integrator_leapfrog.c
/** * @file integrator.c * @brief Leap-frog integration scheme. * @author Hanno Rein <hanno@hanno-rein.de> * @details This file implements the leap-frog integration scheme. * This scheme is second order accurate, symplectic and well suited for * non-rotating coordinate systems. Note that the scheme is formally only * first order accurate when velocity dependent forces are present. * * @section LICENSE * Copyright (c) 2011 Hanno Rein, Shangfei Liu * * This file is part of rebound. * * rebound is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * rebound is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with rebound. If not, see <http://www.gnu.org/licenses/>. * */ #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <math.h> #include <time.h> #include "rebound.h" // Leapfrog integrator (Drift-Kick-Drift) // for non-rotating frame. void reb_integrator_leapfrog_part1(struct reb_simulation* r){ r->gravity_ignore_terms = 0; const int N = r->N; struct reb_particle* restrict const particles = r->particles; const double dt = r->dt; #pragma omp parallel for schedule(guided) for (int i=0;i<N;i++){ particles[i].x += 0.5* dt * particles[i].vx; particles[i].y += 0.5* dt * particles[i].vy; particles[i].z += 0.5* dt * particles[i].vz; } r->t+=dt/2.; } void reb_integrator_leapfrog_part2(struct reb_simulation* r){ const int N = r->N; struct reb_particle* restrict const particles = r->particles; const double dt = r->dt; #pragma omp parallel for schedule(guided) for (int i=0;i<N;i++){ particles[i].vx += dt * particles[i].ax; particles[i].vy += dt * particles[i].ay; particles[i].vz += dt * particles[i].az; particles[i].x += 0.5* dt * particles[i].vx; particles[i].y += 0.5* dt * particles[i].vy; particles[i].z += 0.5* dt * particles[i].vz; } r->t+=dt/2.; r->dt_last_done = r->dt; } void reb_integrator_leapfrog_synchronize(struct reb_simulation* r){ // Do nothing. } void reb_integrator_leapfrog_reset(struct reb_simulation* r){ // Do nothing. }
New_grid.c
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <ctype.h> #include <math.h> #include <limits.h> #include "grb2.h" #include "wgrib2.h" #include "fnlist.h" /* * New_grid * * interpolation using ipolates library * input = winds are N/S or grid * output = winds are N/S or grid depending on wind_rotation * * to add new grids * * input grids: add to mk_kdgs.c * output grids: add to sec3_grids * add code to mode == -1 to parse grid specifications * * to add types to vector fields definition * modify source code: vectors[] * * 6/2010: Public Domain Wesley Ebisuzaki * 5/2018: USE_IPOLATES == 0 disable * USE_IPOLATES == 1 grib1 version of ipolates * USE_IPOLATES == 3 grib2 version of ipolates (double) * in ip2 code, use dlon = mod( (angle + 3600 - 1), 360) + 1 * this makes dlon only xxx.xx precision whic is less * than grib1 (xxx.xxx) and grib2 (xxx.xxxxxx) expectations * should not use float version of ip2. */ const char **vectors; const char *default_vectors[] = {"UGRD", "VGRD", "VUCSH", "VVCSH","UFLX", "VFLX", "UGUST","VGUST","USTM","VSTM","VDFUA", "VDFVA", "MAXUW", "MAXVW", "UOGRD","VOGRD", "UICE", "VICE", "U-GWD", "V-GWD", "USSD", "VSSD", NULL }; /* new_grid_vectors can be called submsg_uv, should not require ipolates to be installed */ static const char *no_vectors[] = { NULL }; static const char *UV_vectors[] = { "UGRD", "VGRD", NULL }; /* * HEADER:111:new_grid_vectors:misc:1:change fields to vector interpolate: X=none,default,UGRD:VGRD,(U:V list) */ int f_new_grid_vectors(ARG1) { int i, n; const char *from; char *to; struct local_struct { char *buff; const char **uv_vectors; }; struct local_struct *save; if (mode == -1) { *local = save = (struct local_struct *) malloc( sizeof(struct local_struct)); if (save == NULL) fatal_error("new_grid_vectors: memory allocation",""); save->buff = NULL; save->uv_vectors = NULL; if (strcmp(arg1,"none") == 0) { save->uv_vectors = vectors = (const char **) no_vectors; return 0; } if (strcmp(arg1,"default") == 0) { save->uv_vectors = vectors = (const char **) default_vectors; return 0; } if (strcmp(arg1,"UGRD:VGRD") == 0) { save->uv_vectors = (const char **) UV_vectors; return 0; } from = arg1; n = 0; while (*from) { if (*from++ == ':') n++; } if (n % 2 == 0) fatal_error("new_grid_vectors: bad definition: %s", arg1); i = strlen(arg1); save->buff = (char *) malloc(i + 1); if (save->buff == NULL) fatal_error("new_grid_vectors: memory allocation",""); save->uv_vectors = (const char **) malloc((n+2) * sizeof(char *)); if (save->uv_vectors == NULL) fatal_error("new_grid_vectors: memory allocation",""); from = arg1; to = save->buff; for (i = 0; i <= n; i++) { save->uv_vectors[i] = to; while (*from != '\0' && *from != ':') { *to++ = *from++; } if (*from == ':') from++; *to++ = '\0'; } save->uv_vectors[n+1] = NULL; return 0; } save = *local; if (mode == -2) { if (save->buff != NULL) { free(save->buff); free(save->uv_vectors); } free(save); return 0; } vectors = save->uv_vectors; return 0; } #if USE_IPOLATES == 1 /* This is the grib1 version which is now frozen. * The grib2 version is similar but * (1) uses the grib2 version of ipolates * (2) double precision * (3) spectral support was removed */ #ifdef G95 #define IPOLATES ipolates_ #define IPOLATEV ipolatev_ void g95_runtime_start(int ,char **); void g95_runtime_stop(void); static int g95_runstop = 0; #endif #ifdef GFORTRAN #define IPOLATES ipolates_ #define IPOLATEV ipolatev_ #endif #ifdef OPENF95 #define IPOLATES ipolates_ #define IPOLATEV ipolatev_ #endif #ifdef IFORT #define IPOLATES ipolates_ #define IPOLATEV ipolatev_ #endif #ifdef XLF #define IPOLATES ipolates #define IPOLATEV ipolatev #endif #ifdef CRAYCE #define IPOLATES ipolates_ #define IPOLATEV ipolatev_ #endif #ifdef SOLARIS #define IPOLATES ipolates_ #define IPOLATEV ipolatev_ #endif void IPOLATES(int *interpol, int *ipopt, int *kgds, int *kgds_out, int *npnts, int *n_out0, int *km, int *ibi, unsigned char *bitmap, float *data_in, int *n_out, float *rlat, float *rlon, int *ibo, unsigned char *bitmap_out, float *data_out, int *iret); void IPOLATEV(int *interpol, int *ipopt, int *kgds, int *kgds_out, int *npnts, int *n_out0, int *km, int *ibi, unsigned char *bitmap, float *u_in, float *v_in, int *n_out, float *rlat, float *rlon, float *crot, float *srot, int *ibo, unsigned char *bitmap_out, float *u_out, float *v_out, int *iret); extern unsigned int npnts,nx,ny; extern double *lat, *lon; extern int decode, latlon, file_append, flush_mode; extern int use_scale, dec_scale, bin_scale, wanted_bits, max_bits; extern enum output_grib_type grib_type; extern enum output_order_type output_order; extern int save_translation; extern enum output_order_type output_order_wanted, output_order; static int interpol_type = 0; static int ipopt[20] = {-1,-1,0, 0,0,0, 0,0,0, 0}; /* * HEADER:111:new_grid_interpolation:misc:1:new_grid interpolation X=bilinear,bicubic,neighbor,budget */ int f_new_grid_interpolation(ARG1) { if (strcmp(arg1,"bilinear") == 0) { interpol_type = 0; ipopt[0] = -1; } else if (strcmp(arg1,"bicubic") == 0) { interpol_type = 1; ipopt[0] = 0; } else if (strcmp(arg1,"neighbor") == 0) { interpol_type = 2; ipopt[0] = 1; } else if (strcmp(arg1,"budget") == 0) { interpol_type = 3; ipopt[0] = -1; } // turned off spectral -- new library rarely used interpolation option // else if (strcmp(arg1,"spectral") == 0) { interpol_type = 4; ipopt[0] = 0; ipopt[1] = -1; } // turned off neighbor-budget - save space for rarely used interpolation option // else if (strcmp(arg1,"neighbor-budget") == 0) { interpol_type = 6; ipopt[0] = -1; } else fatal_error("new_grid_interpolation: unknown type %s", arg1); return 0; } /* * HEADER:111:new_grid_ipopt:misc:1:new_grid ipopt values X=i1:i2..:iN N <= 20 */ int f_new_grid_ipopt(ARG1) { int i, k, val, m; i = 0; k = sscanf(arg1, "%d%n", &val, &m); while (k == 1) { if (i > 19) fatal_error("new_grid_ipopt: too many ipopt values, 20 max",""); ipopt[i++] = val; arg1 += m; k = sscanf(arg1, ":%d%n", &val, &m); } return 0; } /* * HEADER:111:new_grid_winds:misc:1:new_grid wind orientation: X = grid, earth (no default) */ static enum {grid, earth, undefined} wind_rotation = undefined; int f_new_grid_winds(ARG1) { int *save; if (mode == -2) { free(*local); return 0; } if (mode == -1) { if ((*local = save = (int *) malloc(sizeof(int))) == NULL) fatal_error("new_grid_winds: malloc",""); if (strcmp(arg1,"grid") == 0) *save = 0; else if (strcmp(arg1,"earth") == 0) *save = 1; else fatal_error("new_grid_winds: bad arg %s", arg1); } save = (int *) *local; wind_rotation = (*save) ? earth : grid; return 0; } struct local_struct { // U data float *u_val; int has_u, nx, ny; unsigned char *clone_sec[9]; char name[NAMELEN]; // interpolation int npnts_out; // must be integer .. fortran call requires int float *rlat, *rlon, *crot, *srot; unsigned char *sec3; int kgds_out[200]; double radius_major, radius_minor; // output file struct seq_file out; }; unsigned char blank_sec1[21] = { 0,0,0,21,1, 255,255,255,255, // center subcenter 2,1,255, // grib master table, local table, sig ref time 255, 255, // year 255, 255, 255, 255, 255, // month .. second 255, 255}; /* * HEADER:111:new_grid:output:4:bilinear interpolate: X=projection Y=x0:nx:dx Z=y0:ny:dy A=grib_file alpha */ int f_new_grid(ARG4) { struct local_struct *save; unsigned int i; int is_u, is_v, ftn_npnts, ftn_nout; int kgds[200], km; float *data_in, *data_out; double x0, y0, dx, dy, xn, yn; double lov, lad, latin1, latin2; int proj; // projection: for LC 0 = NP, 128 = SP char name[NAMELEN]; int j, ibi, ibo, iret, nnx, nny, n_out; unsigned char *new_sec[8], *s, *bitmap, *bitmap_out, *p; /* for lambertc */ double r_maj, r_min, ref_lon, ref_lat; if (mode == -1) { // initialization decode = 1; output_order_wanted = raw; // in raw order #ifdef G95 // initialize g95 runtime library if (g95_runstop == 0) { g95_runtime_start(0,NULL); g95_runstop = 1; } #endif // if ( (sizeof(vectors) / sizeof (vectors[0])) % 2 == 1) fatal_error("new_grid: program error in vectors[]",""); // allocate static variables *local = save = (struct local_struct *) malloc( sizeof(struct local_struct)); if (save == NULL) fatal_error("new_grid: memory allocation error",""); if (fopen_file(&(save->out), arg4, file_append ? "ab" : "wb") != 0) { fatal_error("-new_grid: could not open file %s", arg4); } save->has_u = 0; save->radius_major = save->radius_minor = 0.0; init_sec(save->clone_sec); s = NULL; // parse NCEP grids */ ncep_grids(&arg1, &arg2, &arg3); // for each output grid if (strcmp(arg1,"latlon") == 0) { if (sscanf(arg2,"%lf:%d:%lf", &x0, &nnx, &dx) != 3) fatal_error("new_grid: XDEF wrong:%s",arg2); if (sscanf(arg3,"%lf:%d:%lf", &y0, &nny, &dy) != 3) fatal_error("new_grid: YDEF wrong:%s",arg3); if (x0 < 0.0) x0 += 360.0; save->nx = nnx; save->ny = nny; save->npnts_out = n_out = nnx*nny; if (n_out <= 0) fatal_error("new_grid: bad nx, ny",""); // make a new section 3 s = sec3_lola(nnx, x0, dx, nny, y0, dy, sec); } else if (strcmp(arg1,"rot-latlon") == 0) { if (sscanf(arg2,"%lf:%d:%lf", &x0, &nnx, &dx) != 3) fatal_error("new_grid: XDEF wrong:%s",arg2); if (sscanf(arg3,"%lf:%d:%lf", &y0, &nny, &dy) != 3) fatal_error("new_grid: YDEF wrong:%s",arg3); if (x0 < 0.0) x0 += 360.0; save->nx = nnx; save->ny = nny; save->npnts_out = n_out = nnx*nny; if (n_out <= 0) fatal_error("new_grid: bad nx, ny",""); // make a new section 3 s = sec3_lola(nnx, x0, dx, nny, y0, dy, sec); } else if (strncmp(arg1,"mercator:",9) == 0) { if (sscanf(arg1,"mercator:%lf", &lad) != 1) fatal_error("new_grid: LaD (latitude interesection) not specified",""); if (sscanf(arg2,"%lf:%d:%lf:%lf", &x0, &nnx, &dx, &xn) != 4) fatal_error("new_grid: XDEF wrong:%s",arg2); if (sscanf(arg3,"%lf:%d:%lf:%lf", &y0, &nny, &dy, &yn) != 4) if (x0 < 0.0) x0 += 360.0; save->nx = nnx; save->ny = nny; save->npnts_out = n_out = nnx*nny; if (n_out <= 0) fatal_error("new_grid: bad nx, ny",""); // make a new section 3 s = sec3_mercator(lad, nnx, x0, dx, xn, nny, y0, dy, yn, sec); } else if (strcmp(arg1,"gaussian") == 0) { if (sscanf(arg2,"%lf:%d:%lf", &x0, &nnx, &dx) != 3) fatal_error("new_grid: XDEF wrong:%s",arg2); if (sscanf(arg3,"%lf:%d", &y0, &nny) != 2) fatal_error("new_grid: YDEF wrong:%s",arg3); if (x0 < 0.0) x0 += 360.0; save->nx = nnx; save->ny = nny; save->npnts_out = n_out = nnx*nny; if (n_out <= 0) fatal_error("new_grid: bad nx, ny",""); // make a new section 3 s = sec3_gaussian(nnx, x0, dx, nny, y0, sec); } else if (strncmp(arg1,"lambert:",8) == 0) { i = sscanf(arg1,"lambert:%lf:%lf:%lf:%lf", &lov, &latin1, &latin2, &lad); if (i < 2) fatal_error("new_grid: arg1 wrong:%s",arg1); if (lov < 0.0) lov += 360.0; if (i < 3) latin2 = latin1; if (i < 4) lad = latin2; proj = 0; if (latin2 < 0.0) proj = 128; if (sscanf(arg2,"%lf:%d:%lf", &x0, &nnx, &dx) != 3) fatal_error("new_grid: XDEF wrong:%s",arg2); if (sscanf(arg3,"%lf:%d:%lf", &y0, &nny, &dy) != 3) fatal_error("new_grid: YDEF wrong:%s",arg3); if (x0 < 0.0) x0 += 360.0; save->nx = nnx; save->ny = nny; save->npnts_out = n_out = nnx*nny; if (n_out <= 0) fatal_error("new_grid: bad nx, ny",""); // make a new section 3 s = sec3_lc(lov, lad, latin1, latin2, proj, nnx, x0, dx, nny, y0, dy, sec); } /* for lambertc, input is the lon-lat of center point */ /* can not calc grid until radius is given, so do lambert code to check args */ else if (strncmp(arg1,"lambertc:",9) == 0) { i = sscanf(arg1,"lambertc:%lf:%lf:%lf:%lf", &lov, &latin1, &latin2, &lad); if (i < 2) fatal_error("new_grid: arg1 wrong:%s",arg1); if (lov < 0.0) lov += 360.0; if (i < 3) latin2 = latin1; if (i < 4) lad = latin2; proj = 0; if (latin2 < 0.0) proj = 128; if (sscanf(arg2,"%lf:%d:%lf", &x0, &nnx, &dx) != 3) fatal_error("new_grid: XDEF wrong:%s",arg2); if (sscanf(arg3,"%lf:%d:%lf", &y0, &nny, &dy) != 3) fatal_error("new_grid: YDEF wrong:%s",arg3); if (x0 < 0.0) x0 += 360.0; save->nx = nnx; save->ny = nny; save->npnts_out = n_out = nnx*nny; if (n_out <= 0) fatal_error("new_grid: bad nx, ny",""); // make a new section 3 s = sec3_lc(lov, lad, latin1, latin2, proj, nnx, x0, dx, nny, y0, dy, sec); } else if (strncmp(arg1,"nps:",4) == 0 || strncmp(arg1,"sps:",4) == 0) { if (sscanf(arg1,"%*[ns]ps:%lf:%lf", &lov, &lad) != 2) fatal_error("new_grid: arg1 wrong:%s",arg1); if (lad != 60.0) fatal_error("New_grid: only LatD = 60 is supported",""); proj = 0; if (arg1[0] == 's') proj = 128; if (sscanf(arg2,"%lf:%d:%lf", &x0, &nnx, &dx) != 3) fatal_error("new_grid: XDEF wrong:%s",arg2); if (sscanf(arg3,"%lf:%d:%lf", &y0, &nny, &dy) != 3) fatal_error("new_grid: YDEF wrong:%s",arg3); if (lov < 0.0) lov += 360.0; if (x0 < 0.0) x0 += 360.0; save->nx = nnx; save->ny = nny; save->npnts_out = n_out = nnx*nny; if (n_out <= 0) fatal_error("new_grid: bad nx, ny",""); // make a new section 3 s = sec3_polar_stereo(lov, lad, proj, nnx, x0, dx, nny, y0, dy, sec); } else fatal_error("new_grid: unsupported output grid %s", arg1); new_sec[1] = blank_sec1; // add center info // save new section 3 i = (int) uint4(s); // size of section 3 new_sec[3] = save->sec3 = (unsigned char *) malloc(i * sizeof(unsigned char)); for (j = 0; j < i; j++) save->sec3[j] = s[j]; // apply wind rotation .. change flag 3.3 if (wind_rotation == undefined) { fprintf(stderr,"Warning: -new_grid wind orientation undefined, " "use \"-new_grid_winds (grid|earth)\", earth used (N=North Pole)\n"); } if ((p = flag_table_3_3_location(new_sec)) != NULL) { if (wind_rotation == grid) *p = *p | 8; else *p = *p & (255 - 8); } if (mk_kgds(new_sec, save->kgds_out)) fatal_error("new_grid: encoding output kgds",""); /* some vectors need by interpolation routines */ if ((save->rlat = (float *) malloc( sizeof(float) * (size_t) n_out)) == NULL) fatal_error("new_grid memory allocation",""); if ((save->rlon = (float *) malloc(sizeof(float) * (size_t) n_out)) == NULL) fatal_error("new_grid memory allocation",""); if ((save->crot = (float *) malloc(sizeof(float) * (size_t) n_out)) == NULL) fatal_error("new_grid memory allocation",""); if ((save->srot = (float *) malloc(sizeof(float) * (size_t) n_out)) == NULL) fatal_error("new_grid memory allocation",""); return 0; } save = (struct local_struct *) *local; if (mode == -2) { // cleanup #ifdef G95 if (g95_runstop == 1) { g95_runtime_stop(); g95_runstop = 0; } #endif if (save->has_u > 0) { fprintf(stderr,"-new_grid: last field %s was not interpolated (missing V)\n", save->name); free(save->u_val); free_sec(save->clone_sec); } free(save->rlon); free(save->rlat); free(save->crot); free(save->srot); free(save->sec3); fclose_file(&(save->out)); free(save); return 0; } if (mode >= 0) { // processing /* The kgds of some output grids will change depending on input grid */ /* for example, radius of earth is not known grib file is read, */ /* and mass vs wind fields */ /* right nowm, only affects lambertc */ if (strncmp(arg1,"lambertc:",8) == 0) { // lambertc depends on the radius of the earth which is // set by the input grib file /* read earth radius */ i = axes_earth(sec, &r_maj, &r_min); if (i) fatal_error_i("axes_earth: error code %d", i); if (save->radius_major != r_maj || save->radius_minor != r_min) { // update sec3 and kgds i = sscanf(arg1,"lambertc:%lf:%lf:%lf:%lf", &lov, &latin1, &latin2, &lad); if (i < 2) fatal_error("new_grid: arg1 wrong:%s",arg1); if (lov < 0.0) lov += 360.0; if (i < 3) latin2 = latin1; if (i < 4) lad = latin2; proj = 0; if (latin2 < 0.0) proj = 128; if (sscanf(arg2,"%lf:%d:%lf", &x0, &nnx, &dx) != 3) fatal_error("new_grid: XDEF wrong:%s",arg2); if (sscanf(arg3,"%lf:%d:%lf", &y0, &nny, &dy) != 3) fatal_error("new_grid: YDEF wrong:%s",arg3); if (x0 < 0.0) x0 += 360.0; save->nx = nnx; save->ny = nny; save->npnts_out = n_out = nnx*nny; if (n_out <= 0) fatal_error("new_grid: bad nx, ny",""); ref_lon = x0; ref_lat = y0; i = new_grid_lambertc(nnx, nny, ref_lon, ref_lat, latin1, latin2, lov, lad, r_maj, r_min, dx, dy, &x0, &y0); if (i) fatal_error_i("new_grid_lambertc: error code %d", i); // make a new section 3 s = sec3_lc(lov, lad, latin1, latin2, proj, nnx, x0, dx, nny, y0, dy, sec); // save new section 3 i = (int) uint4(s); // size of section 3 for (j = 0; j < i; j++) save->sec3[j] = s[j]; // make kgds new_sec[1] = blank_sec1; // add center info new_sec[3] = save->sec3; if (mk_kgds(new_sec, save->kgds_out)) fatal_error("new_grid: encoding output kgds",""); // save radius of earth, to show sec3 and kgds has been done save->radius_major = r_maj; save->radius_minor = r_min; } } if (output_order != raw) fatal_error("new_grid: must be in raw output order",""); i = getName(sec, mode, NULL, name, NULL, NULL); is_u = is_v = 0; // for (j = 0 ; j < sizeof(vectors) / sizeof(vectors[0]); j++) { for (j = 0; vectors[j] != NULL; j++) { if (strcmp(name,vectors[j]) == 0) { if (j % 2 == 0) is_u = 1; else is_v = 1; break; } } // fprintf(stderr, " %s isu %d isv %d has_u %d\n", name, is_u, is_v, save->has_u); // for (i = 0; i < 12; i++) { printf("kgds_out[%d] = %d ",i,save->kgds_out[i]); } // check if V matches expectation if (is_v && (save->has_u == 0 || (same_sec0(sec,save->clone_sec) != 1 || same_sec1(sec,save->clone_sec) != 1 || same_sec3(sec,save->clone_sec) != 1 || same_sec4(sec,save->clone_sec) != 1) )) { fprintf(stderr,"-new_grid: %s doesn't pair with previous vector field, field ignored\n", name); return 0; } // if U field - save if (is_u) { if (save->has_u > 0) { fprintf(stderr,"-new_grid: missing V, %s not interpolated\n",save->name); free(save->u_val); free_sec(save->clone_sec); } copy_sec(sec, save->clone_sec); copy_data(data,ndata,&(save->u_val)); GB2_ParmNum(save->clone_sec) = GB2_ParmNum(sec) + 1; save->has_u = 1; strncpy(save->name, name,NAMELEN-1); save->name[NAMELEN-2]=0; return 0; } // at this point will call polates with either a scalar or vector n_out = save->npnts_out; nnx = save->nx; nny = save->ny; km = 1; // only one field if (mk_kgds(sec, kgds)) fatal_error("new_grid: encoding input kgds",""); data_in = (float *) malloc((1 + (is_v != 0)) * sizeof(float) * (size_t) npnts); bitmap = (unsigned char *) malloc(sizeof(unsigned char) * (size_t) npnts); bitmap_out = (unsigned char *) malloc(sizeof(unsigned char) * (size_t) n_out); data_out = (float *) malloc((1 + (is_v != 0)) * sizeof(float) * (size_t) n_out); if (data_in == NULL || data_out == NULL || bitmap == NULL || bitmap_out == NULL) fatal_error("new_grid: memory allocation problem",""); if (is_v) { #pragma omp parallel for private(i) for (i = 0; i < npnts; i++) { if (DEFINED_VAL(data[i]) && DEFINED_VAL(save->u_val[i])) { data_in[i] = save->u_val[i]; data_in[i+npnts] = data[i]; bitmap[i] = 1; } else { data_in[i] = data_in[i + npnts] = 0.0; bitmap[i] = 0; } } if (mode == 98) fprintf(stderr," UV interpolation %s , %s\n", save->name, name); } else { #pragma omp parallel for private(i) for (i = 0; i < npnts; i++) { if (DEFINED_VAL(data[i])) { data_in[i] = data[i]; bitmap[i] = 1; } else { data_in[i] = 0.0; bitmap[i] = 0; } } } // check if bitmap is used ibi = 0; // input bitmap is not used for (i = 0; i < npnts; i++) { if (bitmap[i] == 0) { ibi = 1; break; } } // interpolate // for (i = 0; i < 12; i++) { printf("\nkgds_in[%d] = %d out=%d ",i,kgds[i],save->kgds_out[i]); } ftn_npnts = (int) npnts; ftn_nout = (int) n_out; if (is_v) { IPOLATEV(&interpol_type, ipopt,kgds,save->kgds_out, &ftn_npnts, &n_out, &km, &ibi, bitmap, data_in, data_in+npnts, &ftn_nout,save->rlat,save->rlon, save->crot, save->srot, &ibo, bitmap_out, data_out, data_out + n_out, &iret); } else { IPOLATES(&interpol_type, ipopt,kgds,save->kgds_out, &ftn_npnts, &n_out, &km, &ibi, bitmap, data_in, &ftn_nout, save->rlat,save->rlon, &ibo, bitmap_out, data_out, &iret); } if (iret != 0) { for (i = 0; i < 12; i++) { fprintf(stderr," IPOLATES error: kgds[%d] input %d output %d\n", i+1,kgds[i],save->kgds_out[i]); } if (iret == 2) fatal_error("IPOLATES failed, unrecognized input grid or no grid point of output grid is in input grid",""); if (iret == 3) fatal_error("IPOLATES failed, unrecognized output grid",""); fatal_error_i("IPOLATES failed, error %d",iret); } n_out = (unsigned int) ftn_nout; /* use bitmap to set UNDEFINED values */ if (ibo == 1) { // has a bitmap if (is_v) { for (i = 0; i < n_out; i++) { if (bitmap_out[i] == 0) data_out[i] = data_out[i+n_out] = UNDEFINED; } } else { for (i = 0; i < n_out; i++) { if (bitmap_out[i] == 0) data_out[i] = UNDEFINED; } } } // now to write out the grib file for (i = 0; i < 8; i++) new_sec[i] = sec[i]; new_sec[3] = save->sec3; if (is_v != 0) { GB2_ParmNum(new_sec) = GB2_ParmNum(new_sec) - 1; grib_wrt(new_sec, data_out, n_out, nnx, nny, use_scale, dec_scale, bin_scale, wanted_bits, max_bits, grib_type, &(save->out)); GB2_ParmNum(new_sec) = GB2_ParmNum(new_sec) + 1; grib_wrt(new_sec, data_out+n_out, n_out, nnx, nny, use_scale, dec_scale, bin_scale, wanted_bits, max_bits, grib_type, &(save->out)); } else { grib_wrt(new_sec, data_out, n_out, nnx, nny, use_scale, dec_scale, bin_scale, wanted_bits, max_bits, grib_type, &(save->out)); } if (flush_mode) fflush_file(&(save->out)); free(data_in); free(bitmap); free(bitmap_out); free(data_out); if (is_v != 0) { save->has_u = 0; free(save->u_val); free_sec(save->clone_sec); } } return 0; } #endif #if USE_IPOLATES == 3 /* * this is the double precision float and 4-byte integer version of grib2 version of ipolates * this version will not handle 2G+ grids (4 byte integers) * some optimizations relative to grib1 version and the single precision grib2 versions * of the wrappers * * spectral restored (removed from grib1 version because I didn't want to support it) */ #ifdef G95 #define IPOLATES ipolates_ #define IPOLATEV ipolatev_ void g95_runtime_start(int ,char **); void g95_runtime_stop(void); static int g95_runstop = 0; #endif #ifdef GFORTRAN #define IPOLATES ipolates_ #define IPOLATEV ipolatev_ #endif #ifdef OPENF95 #define IPOLATES ipolates_ #define IPOLATEV ipolatev_ #endif #ifdef IFORT #define IPOLATES ipolates_ #define IPOLATEV ipolatev_ #endif #ifdef XLF #define IPOLATES ipolates #define IPOLATEV ipolatev #endif #ifdef CRAYCE #define IPOLATES ipolates_ #define IPOLATEV ipolatev_ #endif #ifdef SOLARIS #define IPOLATES ipolates_ #define IPOLATEV ipolatev_ #endif void IPOLATES(int *interpol, int *ipopt, int *gdt_in, int *gdttmpl_in, int *gdttmpl_size_in, int *gdt_out, int *gdttmpl_out, int *gdttmpl_size_out, int *mi, int *mo, int *km, int *ibi, unsigned char *bitmap, double *data_in, int *n_out, double *rlat, double *rlon, int *ibo, unsigned char *bitmap_out, double *data_out, int *iret); void IPOLATEV(int *interpol, int *ipopt, int *gdt_in, int *gdttmpl_in, int *gdttmpl_size_in, int *gdt_out, int *gdttmpl_out, int *gdttmpl_size_out, int *mi, int *mo, int *km, int *ibi, unsigned char *bitmap, double *u_in, double *v_in, int *n_out, double *rlat, double *rlon, double *crot, double *srot, int *ibo, unsigned char *bitmap_out, double *u_out, double *v_out, int *iret); extern int decode, latlon, file_append, flush_mode; extern int use_scale, dec_scale, bin_scale, wanted_bits, max_bits; extern enum output_grib_type grib_type; extern enum output_order_type output_order; extern int save_translation; extern enum output_order_type output_order_wanted, output_order; static enum {grid, earth, undefined} wind_rotation = undefined; int f_new_grid_winds(ARG1) { int *save; if (mode == -2) { free(*local); return 0; } if (mode == -1) { if ((*local = save = (int *) malloc(sizeof(int))) == NULL) fatal_error("new_grid_winds: malloc",""); if (strcmp(arg1,"grid") == 0) *save = 0; else if (strcmp(arg1,"earth") == 0) *save = 1; else fatal_error("new_grid_winds: bad arg %s", arg1); } save = (int *) *local; wind_rotation = (*save) ? earth : grid; return 0; } static int interpol_type = 0; static int ipopt[20] = {-1,-1,0, 0,0,0, 0,0,0, 0}; int f_new_grid_interpolation(ARG1) { #ifdef USE_SPECTRAL char type; int max_wave; #endif if (mode >= -1) { if (strcmp(arg1,"bilinear") == 0) { interpol_type = 0; ipopt[0] = -1; } else if (strcmp(arg1,"bicubic") == 0) { interpol_type = 1; ipopt[0] = 0; } else if (strcmp(arg1,"neighbor") == 0) { interpol_type = 2; ipopt[0] = 1; } else if (strcmp(arg1,"budget") == 0) { interpol_type = 3; ipopt[0] = -1; } #ifdef USE_SPECTRAL else if (sscanf(arg1,"spectral-%c%d", &type, &max_wave) == 2) { if (type == 't' || type == 'T') ipopt[0] = 0; else if (type == 'r' || type == 'R') ipopt[0] = 1; else fatal_error("new_grid_interpolation: spectral-(T|R)NUM not %s", arg1); if (max_wave <= 1) fatal_error("new_grid_interpolation: spectral-(T|R)NUM, NUM > 1", ""); ipopt[1] = max_wave; interpol_type = 4; } #endif // turned off neighbor-budget - poorly explained in documentation // else if (strcmp(arg1,"neighbor-budget") == 0) { interpol_type = 6; ipopt[0] = -1; } else fatal_error("new_grid_interpolation: unknown type %s", arg1); } return 0; } int f_new_grid_ipopt(ARG1) { int i, k, val, m; if (mode >= -1) { i = 0; k = sscanf(arg1, "%d%n", &val, &m); while (k == 1) { if (i > 19) fatal_error("new_grid_ipopt: too many ipopt values, 20 max",""); ipopt[i++] = val; arg1 += m; k = sscanf(arg1, ":%d%n", &val, &m); } } return 0; } /* ipolates is using a fortran library with integers up to 2GB need to check that the grid size is < 2GB */ static void check_grid_size(int nx, int ny); static void check_grid_size(int nx, int ny) { int err; err=0; if (nx <= 0) err = 1; if (ny <= 0) err = 1; if ((double) nx * (double) ny > INT_MAX) err = 1; if (err == 1) fatal_error_i("new_grid: grid size exceeds %d (INT_MAX)", INT_MAX); } struct local_struct { // U data float *u_val; int has_u, nx, ny; unsigned char *clone_sec[9]; char name[NAMELEN]; // interpolation int npnts_out; // must be integer .. fortran call requires int double *rlat, *rlon, *crot, *srot, *data_out; float *data_wrt; unsigned char *bitmap_out; unsigned char *sec3; double radius_major, radius_minor; int gdtnum_out, gdt_out[200], gdt_out_size; // output file struct seq_file out; }; unsigned char blank_sec1[21] = { 0,0,0,21,1, 255,255,255,255, // center subcenter 2,1,255, // grib master table, local table, sig ref time 255, 255, // year 255, 255, 255, 255, 255, // month .. second 255, 255}; int f_new_grid(ARG4) { struct local_struct *save; unsigned int i; int is_u, is_v, ftn_npnts, ftn_nout; int km; double *data_in; int gdtnum_in, gdt_in[200], gdt_in_size; double x0, y0, dx, dy, xn, yn; double lov, lad, latin1, latin2; int proj; // projection: for LC 0 = NP, 128 = SP double sp_lat, sp_lon, sp_rot; char name[NAMELEN]; int j, ibi, ibo, iret, nnx, nny, n_out, tmp_interpol_type; unsigned char *new_sec[8], *s, *bitmap, *p; /* for lambertc */ double r_maj, r_min, ref_lon, ref_lat; if (mode == -1) { // initialization decode = 1; output_order_wanted = raw; // in raw order #ifdef G95 // initialize g95 runtime library if (g95_runstop == 0) { g95_runtime_start(0,NULL); g95_runstop = 1; } #endif // allocate static variables *local = save = (struct local_struct *) malloc( sizeof(struct local_struct)); if (save == NULL) fatal_error("new_grid: memory allocation error",""); if (fopen_file(&(save->out), arg4, file_append ? "ab" : "wb") != 0) { fatal_error("-new_grid: could not open file %s", arg4); } save->has_u = 0; save->radius_major = save->radius_minor = 0.0; init_sec(save->clone_sec); s = NULL; // parse NCEP grids -- if ncep_grid, replace arg1, arg2, arg3 ncep_grids(&arg1, &arg2, &arg3); // for each output grid if (strcmp(arg1,"latlon") == 0) { if (sscanf(arg2,"%lf:%d:%lf", &x0, &nnx, &dx) != 3) fatal_error("new_grid: XDEF wrong:%s",arg2); if (sscanf(arg3,"%lf:%d:%lf", &y0, &nny, &dy) != 3) fatal_error("new_grid: YDEF wrong:%s",arg3); if (x0 < 0.0) x0 += 360.0; save->nx = nnx; save->ny = nny; check_grid_size(nnx, nny); save->npnts_out = n_out = nnx*nny; // make a new section 3 s = sec3_lola(nnx, x0, dx, nny, y0, dy, sec); } else if (strncmp(arg1,"rot-ll:",7) == 0) { if (sscanf(arg1,"rot-ll:%lf:%lf:%lf", &sp_lon, &sp_lat, &sp_rot) != 3) fatal_error("new_grid: bad %s",arg1); if (sscanf(arg2,"%lf:%d:%lf", &x0, &nnx, &dx) != 3) fatal_error("new_grid: XDEF wrong:%s",arg2); if (sscanf(arg3,"%lf:%d:%lf", &y0, &nny, &dy) != 3) fatal_error("new_grid: YDEF wrong:%s",arg3); if (x0 < 0.0) x0 += 360.0; save->nx = nnx; save->ny = nny; check_grid_size(nnx, nny); save->npnts_out = n_out = nnx*nny; s = sec3_rot_ll(nnx, x0, dx, nny, y0, dy, sp_lon, sp_lat, sp_rot, sec); } else if (strncmp(arg1,"mercator:",9) == 0) { if (sscanf(arg1,"mercator:%lf", &lad) != 1) fatal_error("new_grid: LaD (latitude interesection) not specified",""); if (sscanf(arg2,"%lf:%d:%lf:%lf", &x0, &nnx, &dx, &xn) != 4) fatal_error("new_grid: XDEF wrong:%s",arg2); if (sscanf(arg3,"%lf:%d:%lf:%lf", &y0, &nny, &dy, &yn) != 4) if (x0 < 0.0) x0 += 360.0; save->nx = nnx; save->ny = nny; check_grid_size(nnx, nny); save->npnts_out = n_out = nnx*nny; // make a new section 3 s = sec3_mercator(lad, nnx, x0, dx, xn, nny, y0, dy, yn, sec); } else if (strcmp(arg1,"gaussian") == 0) { if (sscanf(arg2,"%lf:%d:%lf", &x0, &nnx, &dx) != 3) fatal_error("new_grid: XDEF wrong:%s",arg2); if (sscanf(arg3,"%lf:%d", &y0, &nny) != 2) fatal_error("new_grid: YDEF wrong:%s",arg3); if (x0 < 0.0) x0 += 360.0; save->nx = nnx; save->ny = nny; check_grid_size(nnx, nny); save->npnts_out = n_out = nnx*nny; // make a new section 3 s = sec3_gaussian(nnx, x0, dx, nny, y0, sec); } else if (strncmp(arg1,"lambert:",8) == 0) { i = sscanf(arg1,"lambert:%lf:%lf:%lf:%lf", &lov, &latin1, &latin2, &lad); if (i < 2) fatal_error("new_grid: arg1 wrong:%s",arg1); if (lov < 0.0) lov += 360.0; if (i < 3) latin2 = latin1; if (i < 4) lad = latin2; proj = 0; if (latin2 < 0.0) proj = 128; if (sscanf(arg2,"%lf:%d:%lf", &x0, &nnx, &dx) != 3) fatal_error("new_grid: XDEF wrong:%s",arg2); if (sscanf(arg3,"%lf:%d:%lf", &y0, &nny, &dy) != 3) fatal_error("new_grid: YDEF wrong:%s",arg3); if (x0 < 0.0) x0 += 360.0; check_grid_size(nnx, nny); save->nx = nnx; save->ny = nny; save->npnts_out = n_out = nnx*nny; // make a new section 3 s = sec3_lc(lov, lad, latin1, latin2, proj, nnx, x0, dx, nny, y0, dy, sec); } /* for lambertc, input is the lon-lat of center point */ /* can not calc grid until radius is given, so do lambert code to check args */ else if (strncmp(arg1,"lambertc:",9) == 0) { i = sscanf(arg1,"lambertc:%lf:%lf:%lf:%lf", &lov, &latin1, &latin2, &lad); if (i < 2) fatal_error("new_grid: arg1 wrong:%s",arg1); if (lov < 0.0) lov += 360.0; if (i < 3) latin2 = latin1; if (i < 4) lad = latin2; proj = 0; if (latin2 < 0.0) proj = 128; if (sscanf(arg2,"%lf:%d:%lf", &x0, &nnx, &dx) != 3) fatal_error("new_grid: XDEF wrong:%s",arg2); if (sscanf(arg3,"%lf:%d:%lf", &y0, &nny, &dy) != 3) fatal_error("new_grid: YDEF wrong:%s",arg3); if (x0 < 0.0) x0 += 360.0; save->nx = nnx; save->ny = nny; check_grid_size(nnx, nny); save->npnts_out = n_out = nnx*nny; // make a new section 3 s = sec3_lc(lov, lad, latin1, latin2, proj, nnx, x0, dx, nny, y0, dy, sec); } else if (strncmp(arg1,"nps:",4) == 0 || strncmp(arg1,"sps:",4) == 0) { if (sscanf(arg1,"%*[ns]ps:%lf:%lf", &lov, &lad) != 2) fatal_error("new_grid: arg1 wrong:%s",arg1); // if (lad != 60.0) fatal_error("New_grid: only LatD = 60 is supported",""); proj = 0; if (arg1[0] == 's') proj = 128; if (sscanf(arg2,"%lf:%d:%lf", &x0, &nnx, &dx) != 3) fatal_error("new_grid: XDEF wrong:%s",arg2); if (sscanf(arg3,"%lf:%d:%lf", &y0, &nny, &dy) != 3) fatal_error("new_grid: YDEF wrong:%s",arg3); if (lov < 0.0) lov += 360.0; if (x0 < 0.0) x0 += 360.0; save->nx = nnx; save->ny = nny; check_grid_size(nnx, nny); save->npnts_out = n_out = nnx*nny; // make a new section 3 s = sec3_polar_stereo(lov, lad, proj, nnx, x0, dx, nny, y0, dy, sec); } else fatal_error("new_grid: unsupported output grid %s", arg1); new_sec[1] = blank_sec1; // add center info // save new section 3 i = (int) uint4(s); // size of section 3 new_sec[3] = save->sec3 = (unsigned char *) malloc(i * sizeof(unsigned char)); for (j = 0; j < i; j++) save->sec3[j] = s[j]; // apply wind rotation .. change flag 3.3 if (wind_rotation == undefined) { fprintf(stderr,"Warning: -new_grid wind orientation undefined, " "use \"-new_grid_winds (grid|earth)\", earth used (N=North Pole)\n"); } if ((p = flag_table_3_3_location(new_sec)) != NULL) { if (wind_rotation == grid) *p = *p | 8; else *p = *p & (255 - 8); } save->gdt_out_size = sizeof(save->gdt_out) / sizeof(save->gdt_out[0]); if (mk_gdt(new_sec, &(save->gdtnum_out), &(save->gdt_out[0]), &(save->gdt_out_size) )) fatal_error("new_grid: encoding output gdt",""); /* some vectors need by interpolation routines */ save->rlat = (double *) malloc(sizeof(double) * (size_t) n_out); save->rlon = (double *) malloc(sizeof(double) * (size_t) n_out); save->crot = (double *) malloc(sizeof(double) * (size_t) n_out); save->srot = (double *) malloc(sizeof(double) * (size_t) n_out); save->data_out = (double *) malloc(2 * sizeof(double) * (size_t) n_out); save->data_wrt = (float *) malloc(sizeof(double) * (size_t) n_out); save->bitmap_out = (unsigned char *) malloc(sizeof(char) * (size_t) n_out); if (save->rlat == NULL || save->rlon == NULL || save->crot == NULL || save->srot == NULL || save->data_out == NULL || save->data_wrt == NULL || save->bitmap_out == NULL) fatal_error("new_grid memory allocation",""); return 0; } save = (struct local_struct *) *local; if (mode == -2) { // cleanup #ifdef G95 if (g95_runstop == 1) { g95_runtime_stop(); g95_runstop = 0; } #endif if (save->has_u > 0) { fprintf(stderr,"-new_grid: last field %s was not interpolated (missing V)\n", save->name); free(save->u_val); free_sec(save->clone_sec); } free(save->rlon); free(save->rlat); free(save->crot); free(save->srot); free(save->data_out); free(save->data_wrt); free(save->bitmap_out); free(save->sec3); fclose_file(&(save->out)); free(save); return 0; } if (mode >= 0) { /* processing grid */ /* The kgds of some output grids will change depending on input grid */ /* for example, radius of earth is not known until grib file is read, */ /* and mass vs wind fields */ /* right nowm, only affects lambertc */ if (strncmp(arg1,"lambertc:",8) == 0) { // lambertc depends on the radius of the earth which is // set by the input grib file /* read earth radius */ i = axes_earth(sec, &r_maj, &r_min); if (i) fatal_error_i("axes_earth: error code %d", i); if (save->radius_major != r_maj || save->radius_minor != r_min) { // update sec3 and kgds i = sscanf(arg1,"lambertc:%lf:%lf:%lf:%lf", &lov, &latin1, &latin2, &lad); if (i < 2) fatal_error("new_grid: arg1 wrong:%s",arg1); if (lov < 0.0) lov += 360.0; if (i < 3) latin2 = latin1; if (i < 4) lad = latin2; proj = 0; if (latin2 < 0.0) proj = 128; if (sscanf(arg2,"%lf:%d:%lf", &x0, &nnx, &dx) != 3) fatal_error("new_grid: XDEF wrong:%s",arg2); if (sscanf(arg3,"%lf:%d:%lf", &y0, &nny, &dy) != 3) fatal_error("new_grid: YDEF wrong:%s",arg3); if (x0 < 0.0) x0 += 360.0; save->nx = nnx; save->ny = nny; check_grid_size(nnx, nny); save->npnts_out = n_out = nnx*nny; ref_lon = x0; ref_lat = y0; i = new_grid_lambertc(nnx, nny, ref_lon, ref_lat, latin1, latin2, lov, lad, r_maj, r_min, dx, dy, &x0, &y0); if (i) fatal_error_i("new_grid_lambertc: error code %d", i); // make a new section 3 s = sec3_lc(lov, lad, latin1, latin2, proj, nnx, x0, dx, nny, y0, dy, sec); // save new section 3 i = (int) uint4(s); // size of section 3 for (j = 0; j < i; j++) save->sec3[j] = s[j]; // make gdt new_sec[1] = blank_sec1; // add center info new_sec[3] = save->sec3; if (mk_gdt(new_sec, &(save->gdtnum_out), &(save->gdt_out[0]), &(save->gdt_out_size) )) fatal_error("new_grid: encoding output gdt",""); // save radius of earth, to show sec3 and kgds has been done save->radius_major = r_maj; save->radius_minor = r_min; } } if (output_order != raw) fatal_error("new_grid: must be in raw output order",""); i = getName(sec, mode, NULL, name, NULL, NULL); is_u = is_v = 0; for (j = 0; vectors[j] != NULL; j++) { if (strcmp(name,vectors[j]) == 0) { if (j % 2 == 0) is_u = 1; else is_v = 1; break; } } // check if V matches expectation if (is_v && (save->has_u == 0 || (same_sec0(sec,save->clone_sec) != 1 || same_sec1(sec,save->clone_sec) != 1 || same_sec3(sec,save->clone_sec) != 1 || same_sec4(sec,save->clone_sec) != 1) )) { fprintf(stderr,"-new_grid: %s doesn't pair with previous vector field, field ignored\n", name); return 0; } // if U field - to sec if (is_u) { if (save->has_u > 0) { fprintf(stderr,"-new_grid: missing V, %s not interpolated\n",save->name); free(save->u_val); free_sec(save->clone_sec); } copy_sec(sec, save->clone_sec); copy_data(data,ndata,&(save->u_val)); GB2_ParmNum(save->clone_sec) = GB2_ParmNum(sec) + 1; save->has_u = 1; strncpy(save->name, name,NAMELEN-1); save->name[NAMELEN-2]=0; return 0; } // at this point will call polates with either a scalar or vector n_out = save->npnts_out; nnx = save->nx; nny = save->ny; km = 1; // only one scalar or vector field gdt_in_size = sizeof(gdt_in) / sizeof(gdt_in[0]); if (mk_gdt(sec, &gdtnum_in, &(gdt_in[0]), &gdt_in_size )) fatal_error("new_grid: encoding input gdt",""); data_in = (double *) malloc((1 + (is_v != 0)) * sizeof(double) * (size_t) ndata); bitmap = (unsigned char *) malloc(sizeof(unsigned char) * (size_t) ndata); if (data_in == NULL || bitmap == NULL) fatal_error("new_grid: memory allocation problem",""); /* data format for ipolates, calling double precision ip2lib double precision data_in[0..npts-1] for scalar double precision data_in[0..2*npts-1] for vector ibi = 0 if bitmap[] is not used (all defined) 1 if bitmap[] is used */ ibi = 0; if (is_v) { #pragma omp parallel for private(i) reduction(|:ibi) for (i = 0; i < ndata; i++) { if (DEFINED_VAL(data[i]) && DEFINED_VAL(save->u_val[i])) { data_in[i] = save->u_val[i]; data_in[i+ndata] = data[i]; bitmap[i] = 1; } else { data_in[i] = data_in[i + ndata] = 0.0; ibi = ibi | 1; bitmap[i] = 0; } } if (mode == 98) fprintf(stderr," UV interpolation %s , %s\n", save->name, name); } else { #pragma omp parallel for private(i) reduction(|:ibi) for (i = 0; i < ndata; i++) { if (DEFINED_VAL(data[i])) { data_in[i] = data[i]; bitmap[i] = 1; } else { data_in[i] = 0.0; ibi = ibi | 1; bitmap[i] = 0; } } } // interpolate tmp_interpol_type = interpol_type; if (interpol_type == 4 && ibi == 1) { fprintf(stderr,"new_grid: undef values, spectral interpolation changed to binlinear\n"); tmp_interpol_type = 0; } ftn_npnts = (int) ndata; ftn_nout = (int) n_out; if (is_v) { IPOLATEV(&tmp_interpol_type, ipopt, &gdtnum_in, &(gdt_in[0]), &gdt_in_size, &(save->gdtnum_out), &(save->gdt_out[0]), &(save->gdt_out_size), &ftn_npnts, &ftn_nout, &km, &ibi, bitmap, data_in, data_in+ndata, &n_out, save->rlat,save->rlon,save->crot,save->srot, &ibo, save->bitmap_out, save->data_out, save->data_out + n_out, &iret); } else { IPOLATES(&tmp_interpol_type, ipopt, &gdtnum_in, &(gdt_in[0]), &gdt_in_size, &(save->gdtnum_out), &(save->gdt_out[0]), &(save->gdt_out_size), &ftn_npnts, &ftn_nout, &km, &ibi, bitmap, data_in, &n_out, save->rlat,save->rlon, &ibo, save->bitmap_out, save->data_out, &iret); } if (iret != 0) { if (iret == 2) fatal_error("IPOLATES failed, unrecognized input grid or no grid point of output grid is in input grid",""); if (iret == 3) fatal_error("IPOLATES failed, unrecognized output grid",""); if (iret == 41 && tmp_interpol_type == 4) fatal_error("IPOLATES failed, non-global grid for spectral interpolation",""); fatal_error_i("IPOLATES failed, error %d",iret); } // now to write out the grib file for (i = 0; i < 8; i++) new_sec[i] = sec[i]; new_sec[3] = save->sec3; // write scalar or U if (is_v == 1) { // change V -> U GB2_ParmNum(new_sec) = GB2_ParmNum(new_sec) - 1; } if (ibo == 1) { // has a bitmap #pragma omp parallel for private(i) for (i = 0; i < n_out; i++) { save->data_wrt[i] = save->bitmap_out[i] == 0 ? UNDEFINED : save->data_out[i]; } } else { #pragma omp parallel for private(i) for (i = 0; i < n_out; i++) { save->data_wrt[i] = save->data_out[i]; } } grib_wrt(new_sec, save->data_wrt, n_out, nnx, nny, use_scale, dec_scale, bin_scale, wanted_bits, max_bits, grib_type, &(save->out)); // write V if necessary if (is_v == 1) { // vector GB2_ParmNum(new_sec) = GB2_ParmNum(new_sec) + 1; if (ibo == 1) { // has a bitmap #pragma omp parallel for private(i) for (i = 0; i < n_out; i++) { save->data_wrt[i] = save->bitmap_out[i] == 0 ? UNDEFINED : save->data_out[i+n_out]; } } else { #pragma omp parallel for private(i) for (i = 0; i < n_out; i++) { save->data_wrt[i] = save->data_out[i+n_out]; } } grib_wrt(new_sec, save->data_wrt, n_out, nnx, nny, use_scale, dec_scale, bin_scale, wanted_bits, max_bits, grib_type, &(save->out)); } if (flush_mode) fflush_file(&(save->out)); free(data_in); free(bitmap); if (is_v != 0) { save->has_u = 0; free(save->u_val); free_sec(save->clone_sec); } } return 0; } #endif #if USE_IPOLATES == 0 int f_new_grid_interpolation(ARG1) { fprintf(stderr,"IPOLATES package is not installed\n"); return 1; } int f_new_grid_ipopt(ARG1) { fprintf(stderr,"IPOLATES package is not installed\n"); return 1; } int f_new_grid(ARG4) { fprintf(stderr,"IPOLATES package is not installed\n"); return 1; } int f_new_grid_winds(ARG1) { fprintf(stderr,"IPOLATES package is not installed\n"); return 1; } #endif
internal-parallel.h
/* * returns index of the last item satisfying * [item] < P, * * returns -1 if [all] < P * */ static ptrdiff_t _bsearch_last_lt(void * P, void * base, size_t nmemb, struct crstruct * d) { if (nmemb == 0) return -1; char tmpradix[d->rsize]; ptrdiff_t left = 0; ptrdiff_t right = nmemb - 1; d->radix((char*) base, tmpradix, d->arg); if(d->compar(tmpradix, P, d->rsize) >= 0) { return - 1; } d->radix((char*) base + right * d->size, tmpradix, d->arg); if(d->compar(tmpradix, P, d->rsize) < 0) { return nmemb - 1; } /* left <= i <= right*/ /* [left] < P <= [right] */ while(right > left + 1) { ptrdiff_t mid = ((right - left + 1) >> 1) + left; d->radix((char*) base + mid * d->size, tmpradix, d->arg); /* if [mid] < P , move left to mid */ /* if [mid] >= P , move right to mid */ int c1 = d->compar(tmpradix, P, d->rsize); if(c1 < 0) { left = mid; } else { right = mid; } } return left; } /* * returns index of the last item satisfying * [item] <= P, * * */ static ptrdiff_t _bsearch_last_le(void * P, void * base, size_t nmemb, struct crstruct * d) { if (nmemb == 0) return -1; char tmpradix[d->rsize]; ptrdiff_t left = 0; ptrdiff_t right = nmemb - 1; d->radix((char*) base, tmpradix, d->arg); if(d->compar(tmpradix, P, d->rsize) > 0) { return -1; } d->radix((char*) base + right * d->size, tmpradix, d->arg); if(d->compar(tmpradix, P, d->rsize) <= 0) { return nmemb - 1; } /* left <= i <= right*/ /* [left] <= P < [right] */ while(right > left + 1) { ptrdiff_t mid = ((right - left + 1) >> 1) + left; d->radix((char*) base + mid * d->size, tmpradix, d->arg); /* if [mid] <= P , move left to mid */ /* if [mid] > P , move right to mid*/ int c1 = d->compar(tmpradix, P, d->rsize); if(c1 <= 0) { left = mid; } else { right = mid; } } return left; } /* * do a histogram of mybase, based on bins defined in P. * P is an array of radix of length Plength, * myCLT, myCLE are of length Plength + 2 * * myCLT[i + 1] is the count of items less than P[i] * myCLE[i + 1] is the count of items less than or equal to P[i] * * myCLT[0] is always 0 * myCLT[Plength + 1] is always mynmemb * * */ static void _histogram(char * P, int Plength, void * mybase, size_t mynmemb, ptrdiff_t * myCLT, ptrdiff_t * myCLE, struct crstruct * d) { int it; myCLT[0] = 0; myCLE[0] = 0; for(it = 0; it < Plength; it ++) { myCLT[it + 1] = _bsearch_last_lt(P + it * d->rsize, mybase, mynmemb, d) + 1; myCLE[it + 1] = _bsearch_last_le(P + it * d->rsize, mybase, mynmemb, d) + 1; } myCLT[it + 1] = mynmemb; myCLE[it + 1] = mynmemb; } #if 0 /* * solve for the communication layout based on * * C: the desired number of items per task * GL_CLT[t,i+1]: the offset of lt P[i] in task t * GL_CLE[t,i+1]: the offset of le P[i] in task t * * the result is saved in * * GL_C[t, i]: the offset of sending to task i in task t. * * this routine requires GL_ to scale with NTask * NTask; * won't work with 1,000 + ranks. * */ static void _solve_for_layout ( int NTask, ptrdiff_t * C, ptrdiff_t * GL_CLT, ptrdiff_t * GL_CLE, ptrdiff_t * GL_C) { int NTask1 = NTask + 1; int i, j; /* first assume we just send according to GL_CLT */ for(i = 0; i < NTask + 1; i ++) { for(j = 0; j < NTask; j ++) { GL_C[j * NTask1 + i] = GL_CLT[j * NTask1 + i]; } } /* Solve for each receiving task i * * this solves for GL_C[..., i + 1], which depends on GL_C[..., i] * * and we have GL_C[..., 0] == 0 by definition. * * this cannot be done in parallel wrt i because of the dependency. * * a solution is guaranteed because GL_CLE and GL_CLT * brackes the total counts C (we've found it with the * iterative counting. * * */ for(i = 0; i < NTask; i ++) { ptrdiff_t sure = 0; /* how many will I surely receive? */ for(j = 0; j < NTask; j ++) { ptrdiff_t sendcount = GL_C[j * NTask1 + i + 1] - GL_C[j * NTask1 + i]; sure += sendcount; } /* let's see if we have enough */ ptrdiff_t deficit = C[i + 1] - C[i] - sure; for(j = 0; j < NTask; j ++) { /* deficit solved */ if(deficit == 0) break; if(deficit < 0) { fprintf(stderr, "serious bug: more items than there should be: deficit=%ld\n", deficit); abort(); } /* how much task j can supply ? */ ptrdiff_t supply = GL_CLE[j * NTask1 + i + 1] - GL_C[j * NTask1 + i + 1]; if(supply < 0) { fprintf(stderr, "serious bug: less items than there should be: supply =%ld\n", supply); abort(); } if(supply <= deficit) { GL_C[j * NTask1 + i + 1] += supply; deficit -= supply; } else { GL_C[j * NTask1 + i + 1] += deficit; deficit = 0; } } } #if 0 for(i = 0; i < NTask; i ++) { for(j = 0; j < NTask + 1; j ++) { printf("%d %d %d, ", GL_CLT[i * NTask1 + j], GL_C[i * NTask1 + j], GL_CLE[i * NTask1 + j]); } printf("\n"); } #endif } #endif struct piter { int * stable; int * narrow; int Plength; char * Pleft; char * Pright; struct crstruct * d; }; static void piter_init(struct piter * pi, char * Pmin, char * Pmax, int Plength, struct crstruct * d) { pi->stable = calloc(Plength, sizeof(int)); pi->narrow = calloc(Plength, sizeof(int)); pi->d = d; pi->Pleft = calloc(Plength, d->rsize); pi->Pright = calloc(Plength, d->rsize); pi->Plength = Plength; int i; for(i = 0; i < pi->Plength; i ++) { memcpy(&pi->Pleft[i * d->rsize], Pmin, d->rsize); memcpy(&pi->Pright[i * d->rsize], Pmax, d->rsize); } } static void piter_destroy(struct piter * pi) { free(pi->stable); free(pi->narrow); free(pi->Pleft); free(pi->Pright); } /* * this will bisect the left / right in piter. * note that piter goes [left, right], thus we need * to maintain an internal status to make sure we go over * the additional 'right]'. (usual bisect range is * '[left, right)' ) * */ static void piter_bisect(struct piter * pi, char * P) { struct crstruct * d = pi->d; int i; for(i = 0; i < pi->Plength; i ++) { if(pi->stable[i]) continue; if(pi->narrow[i]) { /* The last iteration, test Pright directly */ memcpy(&P[i * d->rsize], &pi->Pright[i * d->rsize], d->rsize); pi->stable[i] = 1; } else { /* ordinary iteration */ d->bisect(&P[i * d->rsize], &pi->Pleft[i * d->rsize], &pi->Pright[i * d->rsize], d->rsize); /* in case the bisect can't move P beyond left, * the range is too small, so we set flag narrow, * and next iteration we will directly test Pright */ if(d->compar(&P[i * d->rsize], &pi->Pleft[i * d->rsize], d->rsize) == 0) { pi->narrow[i] = 1; } } #if 0 printf("bisect %d %u %u %u\n", i, *(int*) &P[i * d->rsize], *(int*) &pi->Pleft[i * d->rsize], *(int*) &pi->Pright[i * d->rsize]); #endif } } static int piter_all_done(struct piter * pi) { int i; int done = 1; #if 0 #pragma omp single for(i = 0; i < pi->Plength; i ++) { printf("P %d stable %d narrow %d\n", i, pi->stable[i], pi->narrow[i]); } #endif for(i = 0; i < pi->Plength; i ++) { if(!pi->stable[i]) { done = 0; break; } } return done; } /* * bisection acceptance test. * * test if the counts satisfies CLT < C <= CLE. * move Pleft / Pright accordingly. * */ static void piter_accept(struct piter * pi, char * P, ptrdiff_t * C, ptrdiff_t * CLT, ptrdiff_t * CLE) { struct crstruct * d = pi->d; int i; #if 0 for(i = 0; i < pi->Plength + 1; i ++) { printf("counts %d LT %ld C %ld LE %ld\n", i, CLT[i], C[i], CLE[i]); } #endif for(i = 0; i < pi->Plength; i ++) { if( CLT[i + 1] < C[i + 1] && C[i + 1] <= CLE[i + 1]) { pi->stable[i] = 1; continue; } else { if(CLT[i + 1] >= C[i + 1]) { /* P[i] is too big */ memcpy(&pi->Pright[i * d->rsize], &P[i * d->rsize], d->rsize); } else { /* P[i] is too small */ memcpy(&pi->Pleft[i * d->rsize], &P[i * d->rsize], d->rsize); } } } }
team.c
/* Copyright (C) 2005-2018 Free Software Foundation, Inc. Contributed by Richard Henderson <rth@redhat.com>. This file is part of the GNU Offloading and Multi Processing Library (libgomp). Libgomp is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. Under Section 7 of GPL version 3, you are granted additional permissions described in the GCC Runtime Library Exception, version 3.1, as published by the Free Software Foundation. You should have received a copy of the GNU General Public License and a copy of the GCC Runtime Library Exception along with this program; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see <http://www.gnu.org/licenses/>. */ /* This file handles the maintainence of threads in response to team creation and termination. */ #include "libgomp.h" #include "pool.h" #include <stdlib.h> #include <string.h> #ifdef LIBGOMP_USE_PTHREADS /* This attribute contains PTHREAD_CREATE_DETACHED. */ pthread_attr_t gomp_thread_attr; /* This key is for the thread destructor. */ pthread_key_t gomp_thread_destructor; /* This is the libgomp per-thread data structure. */ #if defined HAVE_TLS || defined USE_EMUTLS __thread struct gomp_thread gomp_tls_data; #else pthread_key_t gomp_tls_key; #endif /* This structure is used to communicate across pthread_create. */ struct gomp_thread_start_data { void (*fn) (void *); void *fn_data; struct gomp_team_state ts; struct gomp_task *task; struct gomp_thread_pool *thread_pool; unsigned int place; bool nested; }; /* This function is a pthread_create entry point. This contains the idle loop in which a thread waits to be called up to become part of a team. */ static void * gomp_thread_start (void *xdata) { struct gomp_thread_start_data *data = xdata; struct gomp_thread *thr; struct gomp_thread_pool *pool; void (*local_fn) (void *); void *local_data; #if defined HAVE_TLS || defined USE_EMUTLS thr = &gomp_tls_data; #else struct gomp_thread local_thr; thr = &local_thr; pthread_setspecific (gomp_tls_key, thr); #endif gomp_sem_init (&thr->release, 0); /* Extract what we need from data. */ local_fn = data->fn; local_data = data->fn_data; thr->thread_pool = data->thread_pool; thr->ts = data->ts; thr->task = data->task; thr->place = data->place; thr->ts.team->ordered_release[thr->ts.team_id] = &thr->release; /* Make thread pool local. */ pool = thr->thread_pool; if (data->nested) { struct gomp_team *team = thr->ts.team; struct gomp_task *task = thr->task; gomp_barrier_wait (&team->barrier); local_fn (local_data); gomp_team_barrier_wait_final (&team->barrier); gomp_finish_task (task); gomp_barrier_wait_last (&team->barrier); } else { pool->threads[thr->ts.team_id] = thr; gomp_simple_barrier_wait (&pool->threads_dock); do { struct gomp_team *team = thr->ts.team; struct gomp_task *task = thr->task; local_fn (local_data); gomp_team_barrier_wait_final (&team->barrier); gomp_finish_task (task); gomp_simple_barrier_wait (&pool->threads_dock); local_fn = thr->fn; local_data = thr->data; thr->fn = NULL; } while (local_fn); } gomp_sem_destroy (&thr->release); thr->thread_pool = NULL; thr->task = NULL; return NULL; } #endif static inline struct gomp_team * get_last_team (unsigned nthreads) { struct gomp_thread *thr = gomp_thread (); if (thr->ts.team == NULL) { struct gomp_thread_pool *pool = gomp_get_thread_pool (thr, nthreads); struct gomp_team *last_team = pool->last_team; if (last_team != NULL && last_team->nthreads == nthreads) { pool->last_team = NULL; return last_team; } } return NULL; } /* Create a new team data structure. */ struct gomp_team * gomp_new_team (unsigned nthreads) { struct gomp_team *team; int i; team = get_last_team (nthreads); if (team == NULL) { size_t extra = sizeof (team->ordered_release[0]) + sizeof (team->implicit_task[0]); team = gomp_malloc (sizeof (*team) + nthreads * extra); #ifndef HAVE_SYNC_BUILTINS gomp_mutex_init (&team->work_share_list_free_lock); #endif gomp_barrier_init (&team->barrier, nthreads); gomp_mutex_init (&team->task_lock); team->nthreads = nthreads; } team->work_share_chunk = 8; #ifdef HAVE_SYNC_BUILTINS team->single_count = 0; #endif team->work_shares_to_free = &team->work_shares[0]; gomp_init_work_share (&team->work_shares[0], false, nthreads); team->work_shares[0].next_alloc = NULL; team->work_share_list_free = NULL; team->work_share_list_alloc = &team->work_shares[1]; for (i = 1; i < 7; i++) team->work_shares[i].next_free = &team->work_shares[i + 1]; team->work_shares[i].next_free = NULL; gomp_sem_init (&team->master_release, 0); team->ordered_release = (void *) &team->implicit_task[nthreads]; team->ordered_release[0] = &team->master_release; priority_queue_init (&team->task_queue); team->task_count = 0; team->task_queued_count = 0; team->task_running_count = 0; team->work_share_cancelled = 0; team->team_cancelled = 0; return team; } /* Free a team data structure. */ static void free_team (struct gomp_team *team) { #ifndef HAVE_SYNC_BUILTINS gomp_mutex_destroy (&team->work_share_list_free_lock); #endif gomp_barrier_destroy (&team->barrier); gomp_mutex_destroy (&team->task_lock); priority_queue_free (&team->task_queue); free (team); } static void gomp_free_pool_helper (void *thread_pool) { struct gomp_thread *thr = gomp_thread (); struct gomp_thread_pool *pool = (struct gomp_thread_pool *) thread_pool; gomp_simple_barrier_wait_last (&pool->threads_dock); gomp_sem_destroy (&thr->release); thr->thread_pool = NULL; thr->task = NULL; #ifdef LIBGOMP_USE_PTHREADS pthread_exit (NULL); #elif defined(__nvptx__) asm ("exit;"); #else #error gomp_free_pool_helper must terminate the thread #endif } /* Free a thread pool and release its threads. */ void gomp_free_thread (void *arg __attribute__((unused))) { struct gomp_thread *thr = gomp_thread (); struct gomp_thread_pool *pool = thr->thread_pool; if (pool) { if (pool->threads_used > 0) { int i; for (i = 1; i < pool->threads_used; i++) { struct gomp_thread *nthr = pool->threads[i]; nthr->fn = gomp_free_pool_helper; nthr->data = pool; } /* This barrier undocks threads docked on pool->threads_dock. */ gomp_simple_barrier_wait (&pool->threads_dock); /* And this waits till all threads have called gomp_barrier_wait_last in gomp_free_pool_helper. */ gomp_simple_barrier_wait (&pool->threads_dock); /* Now it is safe to destroy the barrier and free the pool. */ gomp_simple_barrier_destroy (&pool->threads_dock); #ifdef HAVE_SYNC_BUILTINS __sync_fetch_and_add (&gomp_managed_threads, 1L - pool->threads_used); #else gomp_mutex_lock (&gomp_managed_threads_lock); gomp_managed_threads -= pool->threads_used - 1L; gomp_mutex_unlock (&gomp_managed_threads_lock); #endif } if (pool->last_team) free_team (pool->last_team); #ifndef __nvptx__ free (pool->threads); free (pool); #endif thr->thread_pool = NULL; } if (thr->ts.level == 0 && __builtin_expect (thr->ts.team != NULL, 0)) gomp_team_end (); if (thr->task != NULL) { struct gomp_task *task = thr->task; gomp_end_task (); free (task); } } /* Launch a team. */ #ifdef LIBGOMP_USE_PTHREADS void gomp_team_start (void (*fn) (void *), void *data, unsigned nthreads, unsigned flags, struct gomp_team *team) { struct gomp_thread_start_data *start_data; struct gomp_thread *thr, *nthr; struct gomp_task *task; struct gomp_task_icv *icv; bool nested; struct gomp_thread_pool *pool; unsigned i, n, old_threads_used = 0; pthread_attr_t thread_attr, *attr; unsigned long nthreads_var; char bind, bind_var; unsigned int s = 0, rest = 0, p = 0, k = 0; unsigned int affinity_count = 0; struct gomp_thread **affinity_thr = NULL; thr = gomp_thread (); nested = thr->ts.level; pool = thr->thread_pool; task = thr->task; icv = task ? &task->icv : &gomp_global_icv; if (__builtin_expect (gomp_places_list != NULL, 0) && thr->place == 0) gomp_init_affinity (); /* Always save the previous state, even if this isn't a nested team. In particular, we should save any work share state from an outer orphaned work share construct. */ team->prev_ts = thr->ts; thr->ts.team = team; thr->ts.team_id = 0; ++thr->ts.level; if (nthreads > 1) ++thr->ts.active_level; thr->ts.work_share = &team->work_shares[0]; thr->ts.last_work_share = NULL; #ifdef HAVE_SYNC_BUILTINS thr->ts.single_count = 0; #endif thr->ts.static_trip = 0; thr->task = &team->implicit_task[0]; nthreads_var = icv->nthreads_var; if (__builtin_expect (gomp_nthreads_var_list != NULL, 0) && thr->ts.level < gomp_nthreads_var_list_len) nthreads_var = gomp_nthreads_var_list[thr->ts.level]; bind_var = icv->bind_var; if (bind_var != omp_proc_bind_false && (flags & 7) != omp_proc_bind_false) bind_var = flags & 7; bind = bind_var; if (__builtin_expect (gomp_bind_var_list != NULL, 0) && thr->ts.level < gomp_bind_var_list_len) bind_var = gomp_bind_var_list[thr->ts.level]; gomp_init_task (thr->task, task, icv); team->implicit_task[0].icv.nthreads_var = nthreads_var; team->implicit_task[0].icv.bind_var = bind_var; if (nthreads == 1) return; i = 1; if (__builtin_expect (gomp_places_list != NULL, 0)) { /* Depending on chosen proc_bind model, set subpartition for the master thread and initialize helper variables P and optionally S, K and/or REST used by later place computation for each additional thread. */ p = thr->place - 1; switch (bind) { case omp_proc_bind_true: case omp_proc_bind_close: if (nthreads > thr->ts.place_partition_len) { /* T > P. S threads will be placed in each place, and the final REM threads placed one by one into the already occupied places. */ s = nthreads / thr->ts.place_partition_len; rest = nthreads % thr->ts.place_partition_len; } else s = 1; k = 1; break; case omp_proc_bind_master: /* Each thread will be bound to master's place. */ break; case omp_proc_bind_spread: if (nthreads <= thr->ts.place_partition_len) { /* T <= P. Each subpartition will have in between s and s+1 places (subpartitions starting at or after rest will have s places, earlier s+1 places), each thread will be bound to the first place in its subpartition (except for the master thread that can be bound to another place in its subpartition). */ s = thr->ts.place_partition_len / nthreads; rest = thr->ts.place_partition_len % nthreads; rest = (s + 1) * rest + thr->ts.place_partition_off; if (p < rest) { p -= (p - thr->ts.place_partition_off) % (s + 1); thr->ts.place_partition_len = s + 1; } else { p -= (p - rest) % s; thr->ts.place_partition_len = s; } thr->ts.place_partition_off = p; } else { /* T > P. Each subpartition will have just a single place and we'll place between s and s+1 threads into each subpartition. */ s = nthreads / thr->ts.place_partition_len; rest = nthreads % thr->ts.place_partition_len; thr->ts.place_partition_off = p; thr->ts.place_partition_len = 1; k = 1; } break; } } else bind = omp_proc_bind_false; /* We only allow the reuse of idle threads for non-nested PARALLEL regions. This appears to be implied by the semantics of threadprivate variables, but perhaps that's reading too much into things. Certainly it does prevent any locking problems, since only the initial program thread will modify gomp_threads. */ if (!nested) { old_threads_used = pool->threads_used; if (nthreads <= old_threads_used) n = nthreads; else if (old_threads_used == 0) { n = 0; gomp_simple_barrier_init (&pool->threads_dock, nthreads); } else { n = old_threads_used; /* Increase the barrier threshold to make sure all new threads arrive before the team is released. */ gomp_simple_barrier_reinit (&pool->threads_dock, nthreads); } /* Not true yet, but soon will be. We're going to release all threads from the dock, and those that aren't part of the team will exit. */ pool->threads_used = nthreads; /* If necessary, expand the size of the gomp_threads array. It is expected that changes in the number of threads are rare, thus we make no effort to expand gomp_threads_size geometrically. */ if (nthreads >= pool->threads_size) { pool->threads_size = nthreads + 1; pool->threads = gomp_realloc (pool->threads, pool->threads_size * sizeof (struct gomp_thread_data *)); } /* Release existing idle threads. */ for (; i < n; ++i) { unsigned int place_partition_off = thr->ts.place_partition_off; unsigned int place_partition_len = thr->ts.place_partition_len; unsigned int place = 0; if (__builtin_expect (gomp_places_list != NULL, 0)) { switch (bind) { case omp_proc_bind_true: case omp_proc_bind_close: if (k == s) { ++p; if (p == (team->prev_ts.place_partition_off + team->prev_ts.place_partition_len)) p = team->prev_ts.place_partition_off; k = 1; if (i == nthreads - rest) s = 1; } else ++k; break; case omp_proc_bind_master: break; case omp_proc_bind_spread: if (k == 0) { /* T <= P. */ if (p < rest) p += s + 1; else p += s; if (p == (team->prev_ts.place_partition_off + team->prev_ts.place_partition_len)) p = team->prev_ts.place_partition_off; place_partition_off = p; if (p < rest) place_partition_len = s + 1; else place_partition_len = s; } else { /* T > P. */ if (k == s) { ++p; if (p == (team->prev_ts.place_partition_off + team->prev_ts.place_partition_len)) p = team->prev_ts.place_partition_off; k = 1; if (i == nthreads - rest) s = 1; } else ++k; place_partition_off = p; place_partition_len = 1; } break; } if (affinity_thr != NULL || (bind != omp_proc_bind_true && pool->threads[i]->place != p + 1) || pool->threads[i]->place <= place_partition_off || pool->threads[i]->place > (place_partition_off + place_partition_len)) { unsigned int l; if (affinity_thr == NULL) { unsigned int j; if (team->prev_ts.place_partition_len > 64) affinity_thr = gomp_malloc (team->prev_ts.place_partition_len * sizeof (struct gomp_thread *)); else affinity_thr = gomp_alloca (team->prev_ts.place_partition_len * sizeof (struct gomp_thread *)); memset (affinity_thr, '\0', team->prev_ts.place_partition_len * sizeof (struct gomp_thread *)); for (j = i; j < old_threads_used; j++) { if (pool->threads[j]->place > team->prev_ts.place_partition_off && (pool->threads[j]->place <= (team->prev_ts.place_partition_off + team->prev_ts.place_partition_len))) { l = pool->threads[j]->place - 1 - team->prev_ts.place_partition_off; pool->threads[j]->data = affinity_thr[l]; affinity_thr[l] = pool->threads[j]; } pool->threads[j] = NULL; } if (nthreads > old_threads_used) memset (&pool->threads[old_threads_used], '\0', ((nthreads - old_threads_used) * sizeof (struct gomp_thread *))); n = nthreads; affinity_count = old_threads_used - i; } if (affinity_count == 0) break; l = p; if (affinity_thr[l - team->prev_ts.place_partition_off] == NULL) { if (bind != omp_proc_bind_true) continue; for (l = place_partition_off; l < place_partition_off + place_partition_len; l++) if (affinity_thr[l - team->prev_ts.place_partition_off] != NULL) break; if (l == place_partition_off + place_partition_len) continue; } nthr = affinity_thr[l - team->prev_ts.place_partition_off]; affinity_thr[l - team->prev_ts.place_partition_off] = (struct gomp_thread *) nthr->data; affinity_count--; pool->threads[i] = nthr; } else nthr = pool->threads[i]; place = p + 1; } else nthr = pool->threads[i]; nthr->ts.team = team; nthr->ts.work_share = &team->work_shares[0]; nthr->ts.last_work_share = NULL; nthr->ts.team_id = i; nthr->ts.level = team->prev_ts.level + 1; nthr->ts.active_level = thr->ts.active_level; nthr->ts.place_partition_off = place_partition_off; nthr->ts.place_partition_len = place_partition_len; #ifdef HAVE_SYNC_BUILTINS nthr->ts.single_count = 0; #endif nthr->ts.static_trip = 0; nthr->task = &team->implicit_task[i]; nthr->place = place; gomp_init_task (nthr->task, task, icv); team->implicit_task[i].icv.nthreads_var = nthreads_var; team->implicit_task[i].icv.bind_var = bind_var; nthr->fn = fn; nthr->data = data; team->ordered_release[i] = &nthr->release; } if (__builtin_expect (affinity_thr != NULL, 0)) { /* If AFFINITY_THR is non-NULL just because we had to permute some threads in the pool, but we've managed to find exactly as many old threads as we'd find without affinity, we don't need to handle this specially anymore. */ if (nthreads <= old_threads_used ? (affinity_count == old_threads_used - nthreads) : (i == old_threads_used)) { if (team->prev_ts.place_partition_len > 64) free (affinity_thr); affinity_thr = NULL; affinity_count = 0; } else { i = 1; /* We are going to compute the places/subpartitions again from the beginning. So, we need to reinitialize vars modified by the switch (bind) above inside of the loop, to the state they had after the initial switch (bind). */ switch (bind) { case omp_proc_bind_true: case omp_proc_bind_close: if (nthreads > thr->ts.place_partition_len) /* T > P. S has been changed, so needs to be recomputed. */ s = nthreads / thr->ts.place_partition_len; k = 1; p = thr->place - 1; break; case omp_proc_bind_master: /* No vars have been changed. */ break; case omp_proc_bind_spread: p = thr->ts.place_partition_off; if (k != 0) { /* T > P. */ s = nthreads / team->prev_ts.place_partition_len; k = 1; } break; } /* Increase the barrier threshold to make sure all new threads and all the threads we're going to let die arrive before the team is released. */ if (affinity_count) gomp_simple_barrier_reinit (&pool->threads_dock, nthreads + affinity_count); } } if (i == nthreads) goto do_release; } if (__builtin_expect (nthreads + affinity_count > old_threads_used, 0)) { long diff = (long) (nthreads + affinity_count) - (long) old_threads_used; if (old_threads_used == 0) --diff; #ifdef HAVE_SYNC_BUILTINS __sync_fetch_and_add (&gomp_managed_threads, diff); #else gomp_mutex_lock (&gomp_managed_threads_lock); gomp_managed_threads += diff; gomp_mutex_unlock (&gomp_managed_threads_lock); #endif } attr = &gomp_thread_attr; if (__builtin_expect (gomp_places_list != NULL, 0)) { size_t stacksize; pthread_attr_init (&thread_attr); pthread_attr_setdetachstate (&thread_attr, PTHREAD_CREATE_DETACHED); if (! pthread_attr_getstacksize (&gomp_thread_attr, &stacksize)) pthread_attr_setstacksize (&thread_attr, stacksize); attr = &thread_attr; } start_data = gomp_alloca (sizeof (struct gomp_thread_start_data) * (nthreads-i)); /* Launch new threads. */ for (; i < nthreads; ++i) { pthread_t pt; int err; start_data->ts.place_partition_off = thr->ts.place_partition_off; start_data->ts.place_partition_len = thr->ts.place_partition_len; start_data->place = 0; if (__builtin_expect (gomp_places_list != NULL, 0)) { switch (bind) { case omp_proc_bind_true: case omp_proc_bind_close: if (k == s) { ++p; if (p == (team->prev_ts.place_partition_off + team->prev_ts.place_partition_len)) p = team->prev_ts.place_partition_off; k = 1; if (i == nthreads - rest) s = 1; } else ++k; break; case omp_proc_bind_master: break; case omp_proc_bind_spread: if (k == 0) { /* T <= P. */ if (p < rest) p += s + 1; else p += s; if (p == (team->prev_ts.place_partition_off + team->prev_ts.place_partition_len)) p = team->prev_ts.place_partition_off; start_data->ts.place_partition_off = p; if (p < rest) start_data->ts.place_partition_len = s + 1; else start_data->ts.place_partition_len = s; } else { /* T > P. */ if (k == s) { ++p; if (p == (team->prev_ts.place_partition_off + team->prev_ts.place_partition_len)) p = team->prev_ts.place_partition_off; k = 1; if (i == nthreads - rest) s = 1; } else ++k; start_data->ts.place_partition_off = p; start_data->ts.place_partition_len = 1; } break; } start_data->place = p + 1; if (affinity_thr != NULL && pool->threads[i] != NULL) continue; gomp_init_thread_affinity (attr, p); } start_data->fn = fn; start_data->fn_data = data; start_data->ts.team = team; start_data->ts.work_share = &team->work_shares[0]; start_data->ts.last_work_share = NULL; start_data->ts.team_id = i; start_data->ts.level = team->prev_ts.level + 1; start_data->ts.active_level = thr->ts.active_level; #ifdef HAVE_SYNC_BUILTINS start_data->ts.single_count = 0; #endif start_data->ts.static_trip = 0; start_data->task = &team->implicit_task[i]; gomp_init_task (start_data->task, task, icv); team->implicit_task[i].icv.nthreads_var = nthreads_var; team->implicit_task[i].icv.bind_var = bind_var; start_data->thread_pool = pool; start_data->nested = nested; attr = gomp_adjust_thread_attr (attr, &thread_attr); err = pthread_create (&pt, attr, gomp_thread_start, start_data++); if (err != 0) gomp_fatal ("Thread creation failed: %s", strerror (err)); } if (__builtin_expect (attr == &thread_attr, 0)) pthread_attr_destroy (&thread_attr); do_release: if (nested) gomp_barrier_wait (&team->barrier); else gomp_simple_barrier_wait (&pool->threads_dock); /* Decrease the barrier threshold to match the number of threads that should arrive back at the end of this team. The extra threads should be exiting. Note that we arrange for this test to never be true for nested teams. If AFFINITY_COUNT is non-zero, the barrier as well as gomp_managed_threads was temporarily set to NTHREADS + AFFINITY_COUNT. For NTHREADS < OLD_THREADS_COUNT, AFFINITY_COUNT if non-zero will be always at least OLD_THREADS_COUNT - NTHREADS. */ if (__builtin_expect (nthreads < old_threads_used, 0) || __builtin_expect (affinity_count, 0)) { long diff = (long) nthreads - (long) old_threads_used; if (affinity_count) diff = -affinity_count; gomp_simple_barrier_reinit (&pool->threads_dock, nthreads); #ifdef HAVE_SYNC_BUILTINS __sync_fetch_and_add (&gomp_managed_threads, diff); #else gomp_mutex_lock (&gomp_managed_threads_lock); gomp_managed_threads += diff; gomp_mutex_unlock (&gomp_managed_threads_lock); #endif } if (__builtin_expect (affinity_thr != NULL, 0) && team->prev_ts.place_partition_len > 64) free (affinity_thr); } #endif /* Terminate the current team. This is only to be called by the master thread. We assume that we must wait for the other threads. */ void gomp_team_end (void) { struct gomp_thread *thr = gomp_thread (); struct gomp_team *team = thr->ts.team; /* This barrier handles all pending explicit threads. As #pragma omp cancel parallel might get awaited count in team->barrier in a inconsistent state, we need to use a different counter here. */ gomp_team_barrier_wait_final (&team->barrier); if (__builtin_expect (team->team_cancelled, 0)) { struct gomp_work_share *ws = team->work_shares_to_free; do { struct gomp_work_share *next_ws = gomp_ptrlock_get (&ws->next_ws); if (next_ws == NULL) gomp_ptrlock_set (&ws->next_ws, ws); gomp_fini_work_share (ws); ws = next_ws; } while (ws != NULL); } else gomp_fini_work_share (thr->ts.work_share); gomp_end_task (); thr->ts = team->prev_ts; if (__builtin_expect (thr->ts.team != NULL, 0)) { #ifdef HAVE_SYNC_BUILTINS __sync_fetch_and_add (&gomp_managed_threads, 1L - team->nthreads); #else gomp_mutex_lock (&gomp_managed_threads_lock); gomp_managed_threads -= team->nthreads - 1L; gomp_mutex_unlock (&gomp_managed_threads_lock); #endif /* This barrier has gomp_barrier_wait_last counterparts and ensures the team can be safely destroyed. */ gomp_barrier_wait (&team->barrier); } if (__builtin_expect (team->work_shares[0].next_alloc != NULL, 0)) { struct gomp_work_share *ws = team->work_shares[0].next_alloc; do { struct gomp_work_share *next_ws = ws->next_alloc; free (ws); ws = next_ws; } while (ws != NULL); } gomp_sem_destroy (&team->master_release); if (__builtin_expect (thr->ts.team != NULL, 0) || __builtin_expect (team->nthreads == 1, 0)) free_team (team); else { struct gomp_thread_pool *pool = thr->thread_pool; if (pool->last_team) free_team (pool->last_team); pool->last_team = team; gomp_release_thread_pool (pool); } } #ifdef LIBGOMP_USE_PTHREADS /* Constructors for this file. */ static void __attribute__((constructor)) initialize_team (void) { #if !defined HAVE_TLS && !defined USE_EMUTLS static struct gomp_thread initial_thread_tls_data; pthread_key_create (&gomp_tls_key, NULL); pthread_setspecific (gomp_tls_key, &initial_thread_tls_data); #endif if (pthread_key_create (&gomp_thread_destructor, gomp_free_thread) != 0) gomp_fatal ("could not create thread pool destructor."); } static void __attribute__((destructor)) team_destructor (void) { /* Without this dlclose on libgomp could lead to subsequent crashes. */ pthread_key_delete (gomp_thread_destructor); } #endif struct gomp_task_icv * gomp_new_icv (void) { struct gomp_thread *thr = gomp_thread (); struct gomp_task *task = gomp_malloc (sizeof (struct gomp_task)); gomp_init_task (task, NULL, &gomp_global_icv); thr->task = task; #ifdef LIBGOMP_USE_PTHREADS pthread_setspecific (gomp_thread_destructor, thr); #endif return &task->icv; }
parallel.c
/* Quicksort parallel */ #include <omp.h> #include "parallel.h" void quickSort_parallel(int* array, int lenArray, int numThreads){ int cutoff = 1000; #pragma omp parallel num_threads(numThreads) { #pragma omp single nowait { quickSort_parallel_internal(array, 0, lenArray - 1, cutoff); } } } void quickSort_parallel_internal(int* array, int left, int right, int cutoff) { int i = left; int j = right; int tmp; int pivot = array[(left + right) / 2]; while (i <= j) { while (array[i] < pivot) { i++; } while (array[j] > pivot) { j--; } if (i <= j) { tmp = array[i]; array[i] = array[j]; array[j] = tmp; i++; j--; } } if((right - left) < cutoff) { if(left < j) { quickSort_parallel_internal(array, left, j, cutoff); } if(i < right) { quickSort_parallel_internal(array, i, right, cutoff); } } else { #pragma omp task { quickSort_parallel_internal(array, left, j, cutoff); } #pragma omp task { quickSort_parallel_internal(array, i, right, cutoff); } } }
sort-openmp.c
#include <stdlib.h> #include <stdio.h> #include <time.h> #define N 50000 double start_time; double end_time; void insertionsort(int a[], int n, int step) { for (int j=step; j<n; j+=step) { int key = a[j]; int i = j - step; while (i >= 0 && a[i] > key) { a[i+step] = a[i]; i-=step; } a[i+step] = key; } } void shellsort(int a[], int n) { int i, m; for(m = n/2; m > 0; m /= 2) { #pragma omp parallel for shared(a,m,n) private (i) default(none) for(i = 0; i < m; i++) insertionsort(&(a[i]), n-i, m); } } int main(int argc, char **argv) { const int n = N; int *data; int missorted; data = (int *)malloc(n*sizeof(int)); srand((unsigned int)(NULL)); for (int i=0; i<n; i++) { data[i] = rand() % n; } start_time = clock(); shellsort(data,n); end_time = clock(); printf("OpenMP sort time: %f seconds\n",(end_time-start_time)/CLOCKS_PER_SEC); return 0; }
GB_unop__cimag_fp64_fc64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__cimag_fp64_fc64 // op(A') function: GB_unop_tran__cimag_fp64_fc64 // C type: double // A type: GxB_FC64_t // cast: GxB_FC64_t cij = (aij) // unaryop: cij = cimag (aij) #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = cimag (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC64_t z = (aij) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC64_t z = (aij) ; \ Cx [pC] = cimag (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_CIMAG || GxB_NO_FP64 || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__cimag_fp64_fc64 ( double *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC64_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = (aij) ; Cx [p] = cimag (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = (aij) ; Cx [p] = cimag (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__cimag_fp64_fc64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
pr88203-1.c
/* PR c++/88203 */ /* { dg-do compile } */ /* { dg-additional-options "-std=c99" { target c } } */ /* { dg-additional-options "-std=c++11" { target c++ } } */ void foo (const char *); #pragma omp declare target to (foo) void f1 (void) { #pragma omp parallel default(none) foo (__func__); } void f2 (void) { #pragma omp parallel default(none) shared(__func__) foo (__func__); } void f3 (void) { #pragma omp parallel default(none) firstprivate(__func__) foo (__func__); } void f4 (void) { foo (__func__); #pragma omp parallel default(none) foo (__func__); } void f5 (void) { foo (__func__); #pragma omp parallel default(none) shared(__func__) foo (__func__); } void f6 (void) { foo (__func__); #pragma omp parallel default(none) firstprivate(__func__) foo (__func__); } void f7 (void) { #pragma omp target map(to: __func__) foo (__func__); #pragma omp task depend(inout:__func__) foo (__func__); }
2.norace4.c
// RUN: clang %loadLLOV %s -o /dev/null 2>&1 | FileCheck %s #include <omp.h> #define N 20 int main() { int A[N][N]; for (int i = 1; i < N; i++) #pragma omp simd for (int j = 1; j < N; j++) A[i - 1][j] += A[i][j] + i + j; } // CHECK: Region is Data Race Free. // END
Instrument.h
/* +-----------------------------------+ | | |***FLOP counting instrumentation***| | | | Copyright (c) -tHE SWINe- 2016 | | | | Instrument.h | | | +-----------------------------------+ */ #pragma once #ifndef __FLOP_COUNTING_SCALAR_INCLUDED #define __FLOP_COUNTING_SCALAR_INCLUDED /** * @file include/sparse_flops/Instrument.h * @brief FLOP counting instrumentation of scalar types * @date 2016 * @author -tHE SWINe- */ #include "slam/Integer.h" #include <math.h> /** * @brief wrapper around numeric type which counts operations * * @tparam CBaseSclar is base scalar type * * @note This does not implement conversion operator to the base type, to avoid errors. * @note The in-place operations (e.g. a +=) with base type on the left are not implemented * for the same reason (would most likely lead to not counting some operations). * @note The counters are implemented using OpenMP atomics so this is thread-safe to a * certain degree. * @note The new math functions introduced in C++11 are not wrapped. */ template <class CBaseSclar> class CFLOPCountingScalar { public: typedef CBaseSclar _TyBase; /**< @brief base type */ typedef void (CFLOPCountingScalar::*Boolean)() const; /**< @brief true value for the safe bool idiom */ typedef size_t _TyCount; /**< @brief instruction counter data type */ protected: _TyBase m_f_value; /**< @brief value */ static _TyCount m_n_add_num; /**< @brief counter for addition operations */ static _TyCount m_n_mul_num; /**< @brief counter for multiplication operations */ static _TyCount m_n_div_num; /**< @brief counter for division operations */ static _TyCount m_n_trcd_num; /**< @brief counter for transcendental operations */ static _TyCount m_n_cmp_num; /**< @brief counter for comparison operations */ public: /** * @brief resets the values of all the counters */ static void Reset_Counters() { #pragma omp atomic m_n_add_num ^= m_n_add_num; #pragma omp atomic m_n_mul_num ^= m_n_mul_num; #pragma omp atomic m_n_div_num ^= m_n_div_num; #pragma omp atomic m_n_trcd_num ^= m_n_trcd_num; #pragma omp atomic m_n_cmp_num ^= m_n_cmp_num; } /** * @brief gets the addition operation counter value * @return Returns the number of addition operations * since the last call to \ref Reset_Counters(). */ static inline _TyCount n_Add_Num() { return m_n_add_num; } /** * @brief gets the multiplication operation counter value * @return Returns the number of multiplication operations * since the last call to \ref Reset_Counters(). */ static inline _TyCount n_Multiply_Num() { return m_n_mul_num; } /** * @brief gets the division operation counter value * @return Returns the number of division operations * since the last call to \ref Reset_Counters(). */ static inline _TyCount n_Divide_Num() { return m_n_div_num; } /** * @brief gets the transcendental operation counter value * @return Returns the number of transcendental operations * since the last call to \ref Reset_Counters(). */ static inline _TyCount n_Transcendental_Num() { return m_n_trcd_num; } /** * @brief gets the comparison operation counter value * @return Returns the number of comparison operations * since the last call to \ref Reset_Counters(). */ static inline _TyCount n_Comparison_Num() { return m_n_cmp_num; } /** * @brief gets the sum of all operation counter values * @return Returns the number of (all types of) operations * since the last call to \ref Reset_Counters(). * @note This sum is equally weighted. */ static inline _TyCount n_FLOP_Num() { return m_n_add_num + m_n_mul_num + m_n_div_num + m_n_trcd_num + m_n_cmp_num; } /** * @brief default constructor; has no effect */ CFLOPCountingScalar() {} /** * @brief constructor; initializes the value * @param[in] f_value is value to initialize to */ CFLOPCountingScalar(_TyBase f_value) :m_f_value(f_value) {} /** * @brief constructor; initializes to value obtained by a transcendental operation(s) * * @param[in] f_value is value to initialize to * @param[in] n_transcendent_operation_num is number of * transcendent operations it took to obtain the value */ CFLOPCountingScalar(_TyBase f_value, _TyCount n_transcendent_operation_num) :m_f_value(f_value) { #pragma omp atomic m_n_trcd_num += n_transcendent_operation_num; } /** * @brief gets the value * @return Returns the stored value. * @note Automatic conversion operator to \ref _TyBase is not * implemented as it would make the debugging much harder. */ _TyBase f_Value() const { return m_f_value; } /** * @brief gets the value * @return Returns a reference the stored value. * @note Automatic conversion operator to \ref _TyBase is not * implemented as it would make the debugging much harder. */ _TyBase &f_Value() { return m_f_value; } /** * @brief unary minus operator * @return Returns the negative value of this. * @note This increments the addition counter. */ CFLOPCountingScalar operator -() const { #pragma omp atomic ++ m_n_add_num; return CFLOPCountingScalar(-m_f_value); } /** * @brief addition operator * @param[in] f_x is the value on the right side * @return Returns the sum of the two values. * @note This increments the addition counter. */ CFLOPCountingScalar operator +(_TyBase f_x) const { #pragma omp atomic ++ m_n_add_num; return CFLOPCountingScalar(m_f_value + f_x); } /** * @brief subtraction operator * @param[in] f_x is the value on the right side * @return Returns the difference of the two values. * @note This increments the addition counter. */ CFLOPCountingScalar operator -(_TyBase f_x) const { #pragma omp atomic ++ m_n_add_num; return CFLOPCountingScalar(m_f_value - f_x); } /** * @brief multiplication operator * @param[in] f_x is the value on the right side * @return Returns the product of the two values. * @note This increments the multiplication counter. */ CFLOPCountingScalar operator *(_TyBase f_x) const { #pragma omp atomic ++ m_n_mul_num; return CFLOPCountingScalar(m_f_value * f_x); } /** * @brief division operator * @param[in] f_x is the value on the right side * @return Returns the ratio of the two values. * @note This increments the division counter. */ CFLOPCountingScalar operator /(_TyBase f_x) const { #pragma omp atomic ++ m_n_div_num; return CFLOPCountingScalar(m_f_value / f_x); } /** * @brief inplace addition operator * @param[in] f_x is the value on the right side * @return Returns reference to this. * @note This increments the addition counter. */ CFLOPCountingScalar &operator +=(_TyBase f_x) { #pragma omp atomic ++ m_n_add_num; m_f_value += f_x; return *this; } /** * @brief inplace subtraction operator * @param[in] f_x is the value on the right side * @return Returns reference to this. * @note This increments the addition counter. */ CFLOPCountingScalar &operator -=(_TyBase f_x) { #pragma omp atomic ++ m_n_add_num; m_f_value -= f_x; return *this; } /** * @brief inplace multiplication operator * @param[in] f_x is the value on the right side * @return Returns reference to this. * @note This increments the multiplication counter. */ CFLOPCountingScalar &operator *=(_TyBase f_x) { #pragma omp atomic ++ m_n_mul_num; m_f_value *= f_x; return *this; } /** * @brief inplace division operator * @param[in] f_x is the value on the right side * @return Returns reference to this. * @note This increments the division counter. */ CFLOPCountingScalar &operator /=(_TyBase f_x) { #pragma omp atomic ++ m_n_div_num; m_f_value /= f_x; return *this; } /** * @brief addition operator * @param[in] f_x is the value on the right side * @return Returns the sum of the two values. * @note This increments the addition counter. */ CFLOPCountingScalar operator +(CFLOPCountingScalar f_x) const { #pragma omp atomic ++ m_n_add_num; return CFLOPCountingScalar(m_f_value + f_x.m_f_value); } /** * @brief subtraction operator * @param[in] f_x is the value on the right side * @return Returns the difference of the two values. * @note This increments the addition counter. */ CFLOPCountingScalar operator -(CFLOPCountingScalar f_x) const { #pragma omp atomic ++ m_n_add_num; return CFLOPCountingScalar(m_f_value - f_x.m_f_value); } /** * @brief multiplication operator * @param[in] f_x is the value on the right side * @return Returns the product of the two values. * @note This increments the multiplication counter. */ CFLOPCountingScalar operator *(CFLOPCountingScalar f_x) const { #pragma omp atomic ++ m_n_mul_num; return CFLOPCountingScalar(m_f_value * f_x.m_f_value); } /** * @brief division operator * @param[in] f_x is the value on the right side * @return Returns the ratio of the two values. * @note This increments the division counter. */ CFLOPCountingScalar operator /(CFLOPCountingScalar f_x) const { #pragma omp atomic ++ m_n_div_num; return CFLOPCountingScalar(m_f_value / f_x.m_f_value); } /** * @brief inplace addition operator * @param[in] f_x is the value on the right side * @return Returns reference to this. * @note This increments the addition counter. */ CFLOPCountingScalar &operator +=(CFLOPCountingScalar f_x) { #pragma omp atomic ++ m_n_add_num; m_f_value += f_x.m_f_value; return *this; } /** * @brief inplace subtraction operator * @param[in] f_x is the value on the right side * @return Returns reference to this. * @note This increments the addition counter. */ CFLOPCountingScalar &operator -=(CFLOPCountingScalar f_x) { #pragma omp atomic ++ m_n_add_num; m_f_value -= f_x.m_f_value; return *this; } /** * @brief inplace multiplication operator * @param[in] f_x is the value on the right side * @return Returns reference to this. * @note This increments the multiplication counter. */ CFLOPCountingScalar &operator *=(CFLOPCountingScalar f_x) { #pragma omp atomic ++ m_n_mul_num; m_f_value *= f_x.m_f_value; return *this; } /** * @brief inplace division operator * @param[in] f_x is the value on the right side * @return Returns reference to this. * @note This increments the division counter. */ CFLOPCountingScalar &operator /=(CFLOPCountingScalar f_x) { #pragma omp atomic ++ m_n_div_num; m_f_value /= f_x.m_f_value; return *this; } /** * @brief unary negation operator * @return Returns true if this equals zero, otherwise returns false. * @note This increments the comparison counter. */ bool operator !() const { #pragma omp atomic ++ m_n_cmp_num; return !m_f_value; } /** * @brief conversion to bool * * @return Returns nonzero (not 1) if this does not equal to zero, otherwise returns null. * * @note This uses the safe bool idiom to avoid mixing expansions in unsafe arithmetic expressions. * @note This increments the comparison counter. */ operator Boolean() const { #pragma omp atomic ++ m_n_cmp_num; return (m_f_value)? &CFLOPCountingScalar::True_Value : 0; } /** * @brief less-than operator * @param[in] f_x is the value on the right side * @return Returns true if this equals zero, otherwise returns false. * @note This increments the comparison counter. */ bool operator <(CFLOPCountingScalar f_x) { #pragma omp atomic ++ m_n_cmp_num; return m_f_value < f_x.m_f_value; } /** * @brief greater-than operator * @param[in] f_x is the value on the right side * @return Returns true if this is greater than \ref f_x, otherwise returns false. * @note This increments the comparison counter. */ bool operator >(CFLOPCountingScalar f_x) { return CFLOPCountingScalar(f_x) < m_f_value; } /** * @brief equal-to operator * @param[in] f_x is the value on the right side * @return Returns true if this is equal to \ref f_x, otherwise returns false. * @note This increments the comparison counter. */ bool operator ==(CFLOPCountingScalar f_x) { #pragma omp atomic ++ m_n_cmp_num; return m_f_value == f_x.m_f_value; } /** * @brief not-equal-to operator * @param[in] f_x is the value on the right side * @return Returns true if this is not equal to \ref f_x, otherwise returns false. * @note This increments the comparison counter. */ bool operator !=(CFLOPCountingScalar f_x) { return !(*this == f_x); } /** * @brief less-than or equal operator * @param[in] f_x is the value on the right side * @return Returns true if this is less than or equal to \ref f_x, otherwise returns false. * @note This increments the comparison counter. */ bool operator <=(CFLOPCountingScalar f_x) { return !(*this > f_x); } /** * @brief greater-than or equal operator * @param[in] f_x is the value on the right side * @return Returns true if this is greater than or equal to \ref f_x, otherwise returns false. * @note This increments the comparison counter. */ bool operator >=(CFLOPCountingScalar f_x) { return !(*this < f_x); } /** * @brief less-than operator * @param[in] f_x is the value on the right side * @return Returns true if this equals zero, otherwise returns false. * @note This increments the comparison counter. */ bool operator <(_TyBase f_x) { #pragma omp atomic ++ m_n_cmp_num; return m_f_value < f_x; } /** * @brief greater-than operator * @param[in] f_x is the value on the right side * @return Returns true if this is greater than \ref f_x, otherwise returns false. * @note This increments the comparison counter. */ bool operator >(_TyBase f_x) { return CFLOPCountingScalar(f_x) < m_f_value; } /** * @brief equal-to operator * @param[in] f_x is the value on the right side * @return Returns true if this is equal to \ref f_x, otherwise returns false. * @note This increments the comparison counter. */ bool operator ==(_TyBase f_x) { #pragma omp atomic ++ m_n_cmp_num; return m_f_value == f_x; } /** * @brief not-equal-to operator * @param[in] f_x is the value on the right side * @return Returns true if this is not equal to \ref f_x, otherwise returns false. * @note This increments the comparison counter. */ bool operator !=(_TyBase f_x) { return !(*this == f_x); } /** * @brief less-than or equal operator * @param[in] f_x is the value on the right side * @return Returns true if this is less than or equal to \ref f_x, otherwise returns false. * @note This increments the comparison counter. */ bool operator <=(_TyBase f_x) { return !(*this > f_x); } /** * @brief greater-than or equal operator * @param[in] f_x is the value on the right side * @return Returns true if this is greater than or equal to \ref f_x, otherwise returns false. * @note This increments the comparison counter. */ bool operator >=(_TyBase f_x) { return !(*this < f_x); } protected: /** * @brief value of true for the safe bool idiom */ void True_Value() const {} }; /** * @brief addition operator * * @tparam CBaseSclar is a scalar type * * @param[in] f_x is the value on the left side * @param[in] f_y is the value on the right side * * @return Returns the sum of the two values. * * @note This increments the addition counter. */ template <class CBaseSclar> inline CFLOPCountingScalar<CBaseSclar> operator +(CBaseSclar f_x, CFLOPCountingScalar<CBaseSclar> f_y) { return CFLOPCountingScalar<CBaseSclar>(f_x) + f_y; } /** * @brief subtraction operator * * @tparam CBaseSclar is a scalar type * * @param[in] f_x is the value on the left side * @param[in] f_y is the value on the right side * * @return Returns the difference of the two values. * * @note This increments the addition counter. */ template <class CBaseSclar> inline CFLOPCountingScalar<CBaseSclar> operator -(CBaseSclar f_x, CFLOPCountingScalar<CBaseSclar> f_y) { return CFLOPCountingScalar<CBaseSclar>(f_x) - f_y; } /** * @brief multiplication operator * * @tparam CBaseSclar is a scalar type * * @param[in] f_x is the value on the left side * @param[in] f_y is the value on the right side * * @return Returns the product of the two values. * * @note This increments the multiplication counter. */ template <class CBaseSclar> inline CFLOPCountingScalar<CBaseSclar> operator *(CBaseSclar f_x, CFLOPCountingScalar<CBaseSclar> f_y) { return CFLOPCountingScalar<CBaseSclar>(f_x) * f_y; } /** * @brief division operator * * @tparam CBaseSclar is a scalar type * * @param[in] f_x is the value on the left side * @param[in] f_y is the value on the right side * * @return Returns the ratio of the two values. * * @note This increments the division counter. */ template <class CBaseSclar> inline CFLOPCountingScalar<CBaseSclar> operator /(CBaseSclar f_x, CFLOPCountingScalar<CBaseSclar> f_y) { return CFLOPCountingScalar<CBaseSclar>(f_x) / f_y; } /** * @brief greater-than operator * * @tparam CBaseSclar is a scalar type * * @param[in] f_x is the value on the left side * @param[in] f_y is the value on the right side * * @return Returns true if this is greater than \ref f_x, otherwise returns false. * @note This increments the comparison counter. */ template <class CBaseSclar> inline bool operator >(CBaseSclar f_x, CFLOPCountingScalar<CBaseSclar> f_y) { return f_y < f_x; } /** * @brief equal-to operator * * @tparam CBaseSclar is a scalar type * * @param[in] f_x is the value on the left side * @param[in] f_y is the value on the right side * * @return Returns true if this is equal to \ref f_x, otherwise returns false. * @note This increments the comparison counter. */ template <class CBaseSclar> inline bool operator ==(CBaseSclar f_x, CFLOPCountingScalar<CBaseSclar> f_y) { return f_y == f_x; } /** * @brief not-equal-to operator * * @tparam CBaseSclar is a scalar type * * @param[in] f_x is the value on the left side * @param[in] f_y is the value on the right side * * @return Returns true if this is not equal to \ref f_x, otherwise returns false. * @note This increments the comparison counter. */ template <class CBaseSclar> inline bool operator !=(CBaseSclar f_x, CFLOPCountingScalar<CBaseSclar> f_y) { return f_y != f_x; } /** * @brief less-than or equal operator * * @tparam CBaseSclar is a scalar type * * @param[in] f_x is the value on the left side * @param[in] f_y is the value on the right side * * @return Returns true if this is less than or equal to \ref f_x, otherwise returns false. * @note This increments the comparison counter. */ template <class CBaseSclar> inline bool operator <=(CBaseSclar f_x, CFLOPCountingScalar<CBaseSclar> f_y) { return f_y >= f_x; } /** * @brief greater-than or equal operator * * @tparam CBaseSclar is a scalar type * * @param[in] f_x is the value on the left side * @param[in] f_y is the value on the right side * * @return Returns true if this is greater than or equal to \ref f_x, otherwise returns false. * @note This increments the comparison counter. */ template <class CBaseSclar> inline bool operator >=(CBaseSclar f_x, CFLOPCountingScalar<CBaseSclar> f_y) { return f_y <= f_x; } template <class CBaseSclar> typename CFLOPCountingScalar<CBaseSclar>::_TyCount CFLOPCountingScalar<CBaseSclar>::m_n_add_num = 0; template <class CBaseSclar> typename CFLOPCountingScalar<CBaseSclar>::_TyCount CFLOPCountingScalar<CBaseSclar>::m_n_mul_num = 0; template <class CBaseSclar> typename CFLOPCountingScalar<CBaseSclar>::_TyCount CFLOPCountingScalar<CBaseSclar>::m_n_div_num = 0; template <class CBaseSclar> typename CFLOPCountingScalar<CBaseSclar>::_TyCount CFLOPCountingScalar<CBaseSclar>::m_n_trcd_num = 0; template <class CBaseSclar> typename CFLOPCountingScalar<CBaseSclar>::_TyCount CFLOPCountingScalar<CBaseSclar>::m_n_cmp_num = 0; // values of the counters /** * @brief (integer) absolute value function * @tparam CBaseSclar is a scalar type * @param[in] f_x is value to take absolute value of * @return Returns absolute value of \ref f_x. * @note This increments (perhaps not entirely correctly) the transcendental operation counter. */ template <class CBaseSclar> CFLOPCountingScalar<CBaseSclar> abs(CFLOPCountingScalar<CBaseSclar> f_x) { return CFLOPCountingScalar<CBaseSclar>((CBaseSclar)abs(int(f_x.f_Value())), 1); } /** * @brief (integer) absolute value function * @tparam CBaseSclar is a scalar type * @param[in] f_x is value to take absolute value of * @return Returns absolute value of \ref f_x. * @note This increments (perhaps not entirely correctly) the transcendental operation counter. */ template <class CBaseSclar> CFLOPCountingScalar<CBaseSclar> labs(CFLOPCountingScalar<CBaseSclar> f_x) { return CFLOPCountingScalar<CBaseSclar>((CBaseSclar)labs(long(f_x.f_Value())), 1); } /** * @def DECLARE_UNARY_TRANSCENDENTAL_OP * @brief declares unary transcendental operation * @tparam opname is operation name (e.g. sin) */ #define DECLARE_UNARY_TRANSCENDENTAL_OP(opname) \ template <class CBaseSclar> \ CFLOPCountingScalar<CBaseSclar> opname(CFLOPCountingScalar<CBaseSclar> f_x) \ { \ return CFLOPCountingScalar<CBaseSclar>((CBaseSclar)opname(double(f_x.f_Value())), 1); /* did one transcendental op */ \ } /** * @def DECLARE_BINARY_TRANSCENDENTAL_OP * @brief declares binary transcendental operation * @tparam opname is operation name (e.g. atan2) */ #define DECLARE_BINARY_TRANSCENDENTAL_OP(opname) \ template <class CBaseSclar> \ CFLOPCountingScalar<CBaseSclar> opname(CFLOPCountingScalar<CBaseSclar> f_x, CFLOPCountingScalar<CBaseSclar> f_y) \ { \ return CFLOPCountingScalar<CBaseSclar>((CBaseSclar)opname(double(f_x.f_Value()), double(f_y.f_Value())), 1); /* did one transcendental op */ \ } /** * @brief absolute value function * @tparam CBaseSclar is a scalar type * @param[in] f_x is value to take absolute value of * @return Returns absolute value of \ref f_x. * @note This increments (perhaps not entirely correctly) the transcendental operation counter. */ DECLARE_UNARY_TRANSCENDENTAL_OP(fabs) /** * @brief arc-sine function * @tparam CBaseSclar is a scalar type * @param[in] f_x is value of the input argument * @return Returns arc-sine of \ref f_x, expressed in radians. * @note This increments the transcendental operation counter. */ DECLARE_UNARY_TRANSCENDENTAL_OP(asin) /** * @brief arc-cosine function * @tparam CBaseSclar is a scalar type * @param[in] f_x is value of the input argument * @return Returns arc-cosine of \ref f_x, expressed in radians. * @note This increments the transcendental operation counter. */ DECLARE_UNARY_TRANSCENDENTAL_OP(acos) /** * @brief arc-tangent function * @tparam CBaseSclar is a scalar type * @param[in] f_x is value of the input argument * @return Returns arc-tangent of \ref f_x, expressed in radians. * @note This increments the transcendental operation counter. */ DECLARE_UNARY_TRANSCENDENTAL_OP(atan) /** * @brief binary arc-tangent function * * @tparam CBaseSclar is a scalar type * * @param[in] f_x is value of the first input argument * @param[in] f_y is value of the second input argument * * @return Returns arc-tangent of \ref f_x / \ref f_y, expressed in radians. * * @note This increments the transcendental operation counter. */ DECLARE_BINARY_TRANSCENDENTAL_OP(atan2) /** * @brief cosine function * @tparam CBaseSclar is a scalar type * @param[in] f_x is value of the input argument, expressed in radians * @return Returns cosine of \ref f_x. * @note This increments the transcendental operation counter. */ DECLARE_UNARY_TRANSCENDENTAL_OP(cos) /** * @brief hyperbolic cosine function * @tparam CBaseSclar is a scalar type * @param[in] f_x is value of the input argument, expressed in radians * @return Returns hyperbolic cosine of \ref f_x. * @note This increments the transcendental operation counter. */ DECLARE_UNARY_TRANSCENDENTAL_OP(cosh) /** * @brief base-e exponential function * @tparam CBaseSclar is a scalar type * @param[in] f_x is value of the input argument * @return Returns natural exponent of \ref f_x. * @note This increments the transcendental operation counter. */ DECLARE_UNARY_TRANSCENDENTAL_OP(exp) /** * @brief floating-point modulo function * * @tparam CBaseSclar is a scalar type * * @param[in] f_x is value of the numerator * @param[in] f_y is value of the denominator * * @return Returns modulo of \ref f_x / \ref f_y. * * @note This increments the transcendental operation counter. */ DECLARE_BINARY_TRANSCENDENTAL_OP(fmod) /** * @brief base-e logarithm function * @tparam CBaseSclar is a scalar type * @param[in] f_x is value of the input argument * @return Returns natural logarithm of \ref f_x. * @note This increments the transcendental operation counter. */ DECLARE_UNARY_TRANSCENDENTAL_OP(log) /** * @brief base-10 logarithm function * @tparam CBaseSclar is a scalar type * @param[in] f_x is value of the input argument * @return Returns base-10 logarithm of \ref f_x. * @note This increments the transcendental operation counter. */ DECLARE_UNARY_TRANSCENDENTAL_OP(log10) /** * @brief raise-to-power function * * @tparam CBaseSclar is a scalar type * * @param[in] f_x is value of the base * @param[in] f_y is value of the exponent * * @return Returns \ref f_x to the power of \ref f_y. * * @note This increments the transcendental operation counter. */ DECLARE_BINARY_TRANSCENDENTAL_OP(pow) /** * @brief sine function * @tparam CBaseSclar is a scalar type * @param[in] f_x is value of the input argument, expressed in radians * @return Returns sine of \ref f_x. * @note This increments the transcendental operation counter. */ DECLARE_UNARY_TRANSCENDENTAL_OP(sin) /** * @brief hyperbolic sine function * @tparam CBaseSclar is a scalar type * @param[in] f_x is value of the input argument, expressed in radians * @return Returns hyperbolic sine of \ref f_x. * @note This increments the transcendental operation counter. */ DECLARE_UNARY_TRANSCENDENTAL_OP(sinh) /** * @brief tangent function * @tparam CBaseSclar is a scalar type * @param[in] f_x is value of the input argument, expressed in radians * @return Returns tangent of \ref f_x. * @note This increments the transcendental operation counter. */ DECLARE_UNARY_TRANSCENDENTAL_OP(tan) /** * @brief hyperbolic tangent function * @tparam CBaseSclar is a scalar type * @param[in] f_x is value of the input argument, expressed in radians * @return Returns hyperbolic tangent of \ref f_x. * @note This increments the transcendental operation counter. */ DECLARE_UNARY_TRANSCENDENTAL_OP(tanh) /** * @brief square root function * @tparam CBaseSclar is a scalar type * @param[in] f_x is value of the input argument * @return Returns square root of \ref f_x. * @note This increments the transcendental operation counter. */ DECLARE_UNARY_TRANSCENDENTAL_OP(sqrt) /** * @brief round up function * @tparam CBaseSclar is a scalar type * @param[in] f_x is value of the input argument * @return Returns the closest integer greater than or equal to \ref f_x. * @note This increments the transcendental operation counter. */ DECLARE_UNARY_TRANSCENDENTAL_OP(ceil) /** * @brief round down function * @tparam CBaseSclar is a scalar type * @param[in] f_x is value of the input argument * @return Returns the closest integer smaller than or equal to \ref f_x. * @note This increments the transcendental operation counter. */ DECLARE_UNARY_TRANSCENDENTAL_OP(floor) typedef CFLOPCountingScalar<float> CFLOPCountingFloat; /**< @brief FLOP-counting float */ typedef CFLOPCountingScalar<double> CFLOPCountingDouble; /**< @brief FLOP-counting double */ /** * @page countingflops Counting FLOPs in Sparse Operations * * This example shows how to make use of \ref CFLOPCountingDouble and \ref CTSparse * to count floating point operations (FLOPs) in arbitrary sparse operations. We begin * by including the two necessary files: * * @code * #include "sparse_flops/cts.hpp" * #include "sparse_flops/Instrument.h" * @endcode * * Now it is possible to typedef a flavor of templated CSparse which will count FLOPs: * * @code * typedef CTSparse<CFLOPCountingDouble> CFLOPCountingSparse; * @endcode * * A nice thing about this is that \ref CFLOPCountingDouble is a thin wrapper around * `double` and pointers to the two can be converted (so called type punning). This also * allows us to convert between ordinary sparse matrices \ref cs and `CFLOPCountingSparse`: * * @code * cs *A = cs_spalloc(...); * CFLOPCountingSparse *A_instrumented = CFLOPCountingSparse::p_FromSparse(A); // there * * CFLOPCountingSparse *B_instrumented = CFLOPCountingSparse::spalloc(...); * cs *B = CFLOPCountingSparse::p_ToSparse(B_instrumented); // and back * @endcode * * Here, the functions \ref CTSparse::p_FromSparse() and \ref CTSparse::p_ToSparse() * perform the necessary checks to make sure that the conversion is safe, otherwise * such code fails to compile. Note that the pointers `A` and `A_instrumented` * point to the same memory location (equally as `B` and `B_instrumented` do) and * no new memory is allocated. * * Now suppose we want to count the real number of FLOPs in a sparse Cholesky * factorization: * * @code * size_t n_Chol_FLOP_Num(const cs *A, int order = CFLOPCountingSparse::order_AMD_Chol) * { * CFLOPCountingSparse::css *S = CFLOPCountingSparse::schol(order, * CFLOPCountingSparse::p_FromSparse(A)); // calls AMD * * size_t n_before_chol = CFLOPCountingDouble::n_FLOP_Num(); * * CFLOPCountingSparse::csn *N = CFLOPCountingSparse::chol(CFLOPCountingSparse::p_FromSparse(A), S); * * size_t n_flops = CFLOPCountingDouble::n_FLOP_Num() - n_before_chol; * * CFLOPCountingSparse::sfree(S); * CFLOPCountingSparse::nfree(N); * * return n_flops; * } * @endcode * * The first line performs symbolic factorization using \ref CTSparse::schol(). Then, * the counters of floating point operations are sampled, using \ref CFLOPCountingScalar::n_FLOP_Num() * and the numeric Cholesky factorization is performed using \ref CTSparse::chol(). * After that, the difference in the number of FLOPs is taken. Alternatively, one can call * \ref CFLOPCountingScalar::Reset_Counters() before and then directly read out the number of operations * using \ref CFLOPCountingScalar::n_FLOP_Num(). * * This has the advantage that it actually calculates the factorization in the process, so it is * fairly easy to instrument existing code this way and it is possible to count FLOPs in iterative * code where the stopping condition depends on the computed values. * * The full code of the example follows: * * @code * #include "sparse_flops/cts.hpp" * #include "sparse_flops/Instrument.h" * * typedef CTSparse<CFLOPCountingDouble> CFLOPCountingSparse; * * cs *p_AllocFull(csi m, csi n, double f_value = 1.0) * { * if(n && m > LONG_MAX / n) * return 0; // would overflow below * cs *p_matrix = cs_spalloc(m, n, m * n, 1, 0); * csi n_off = 0; * for(csi i = 0; i < n; ++ i) { * p_matrix->p[i] = n_off; * for(csi j = 0; j < m; ++ j, ++ n_off) { * p_matrix->i[n_off] = j; * p_matrix->x[n_off] = f_value; * } * } * p_matrix->p[n] = n_off; * return p_matrix; * } * * cs *p_AllocLower(csi m, csi n, double f_value = 1.0) * { * if(n && m > LONG_MAX / n) * return 0; // would overflow below * size_t n_nnz = std::min(m, n) * (std::min(m, n) - 1) / 2 + std::min(m, n) + // the square triangular section * (m - std::min(m, n)) * n; // the bottom side if the matrix is narrow (completely filled) * cs *p_matrix = cs_spalloc(m, n, n_nnz, 1, 0); * csi n_off = 0; * for(csi i = 0; i < n; ++ i) { * p_matrix->p[i] = n_off; * for(csi j = i; j < m; ++ j, ++ n_off) { * p_matrix->i[n_off] = j; * p_matrix->x[n_off] = f_value; * } * } * p_matrix->p[n] = n_off; * _ASSERTE(n_off == n_nnz); * return p_matrix; * } * * size_t n_GEMM_FLOP_Num(const cs *A, const cs *B) * { * size_t n_before = CFLOPCountingDouble::n_FLOP_Num(); * * cs *p_result = CFLOPCountingSparse::p_ToSparse(CFLOPCountingSparse::multiply( * CFLOPCountingSparse::p_FromSparse(A), CFLOPCountingSparse::p_FromSparse(B))); * cs_spfree(p_result); * * return CFLOPCountingDouble::n_FLOP_Num() - n_before; * } * * size_t n_GAXPY_FLOP_Num(const cs *A, const double *x, double *y) * { * size_t n_before = CFLOPCountingDouble::n_FLOP_Num(); * * CFLOPCountingSparse::gaxpy(CFLOPCountingSparse::p_FromSparse(A), * (CFLOPCountingDouble*)x, (CFLOPCountingDouble*)y); * * return CFLOPCountingDouble::n_FLOP_Num() - n_before; * } * * size_t n_TRSV_FLOP_Num(const cs *L, double *x) * { * size_t n_before = CFLOPCountingDouble::n_FLOP_Num(); * * CFLOPCountingSparse::lsolve(CFLOPCountingSparse::p_FromSparse(L), (CFLOPCountingDouble*)x); * * return CFLOPCountingDouble::n_FLOP_Num() - n_before; * } * * size_t n_Chol_FLOP_Num(const cs *A, int order = CFLOPCountingSparse::order_AMD_Chol) * { * CFLOPCountingSparse::css *S = CFLOPCountingSparse::schol(order, * CFLOPCountingSparse::p_FromSparse(A)); // calls AMD * * size_t n_before_chol = CFLOPCountingDouble::n_FLOP_Num(); * * CFLOPCountingSparse::csn *N = CFLOPCountingSparse::chol(CFLOPCountingSparse::p_FromSparse(A), S); * * size_t n_flops = CFLOPCountingDouble::n_FLOP_Num() - n_before_chol; * * CFLOPCountingSparse::sfree(S); * CFLOPCountingSparse::nfree(N); * * return n_flops; * } * * size_t n_LU_FLOP_Num(const cs *A, int order = CFLOPCountingSparse::order_AMD_LU) * { * CFLOPCountingSparse::css *S = CFLOPCountingSparse::sqr(order, * CFLOPCountingSparse::p_FromSparse(A), 0); // calls AMD * * size_t n_before_chol = CFLOPCountingDouble::n_FLOP_Num(); * * CFLOPCountingSparse::csn *N = CFLOPCountingSparse::lu(CFLOPCountingSparse::p_FromSparse(A), S, 1e-3); * * size_t n_flops = CFLOPCountingDouble::n_FLOP_Num() - n_before_chol; * * CFLOPCountingSparse::sfree(S); * CFLOPCountingSparse::nfree(N); * * return n_flops; * } * * void Test_SparseOpsCost() * { * cs *A = p_AllocFull(100, 100); * * printf("counting FLOPs in GEMM of two 100 x 100 matrices\n"); * size_t n_GEMM_cost = n_GEMM_FLOP_Num(A, A); * size_t n_GEMM_cost_GT = 100 * 100 * (100 * 2 - 1); // the leading addition is saved * printf("\tground truth FLOPs: " PRIsize "\n", n_GEMM_cost_GT); * printf("\trecorded FLOPs: " PRIsize " (%s)\n", n_GEMM_cost, * (n_GEMM_cost == n_GEMM_cost_GT)? "pass" : "FAIL"); * * printf("\ncounting FLOPs in GAXPY of a 100 x 100 matrix and a 100 x 1 vector\n"); * double x[100] = {0}, y[100] = {0}; * size_t n_GAXPY_cost = n_GAXPY_FLOP_Num(A, x, y); * size_t n_GAXPY_cost_GT = 100 * 100 * 2; * printf("\tground truth FLOPs: " PRIsize "\n", n_GAXPY_cost_GT); * printf("\trecorded FLOPs: " PRIsize " (%s)\n", n_GAXPY_cost, * (n_GAXPY_cost == n_GAXPY_cost_GT)? "pass" : "FAIL"); * * for(int i = 0; i < 100; ++ i) * A->x[i * 100 + i] = 10.0; * // make the diagonal a bit larger in order for the matrix to be positive definite * * printf("\ncounting FLOPs in Cholesky of a 100 x 100 matrix\n"); * size_t n_Chol_cost = n_Chol_FLOP_Num(A, CFLOPCountingSparse::order_Natural); * size_t n_Chol_cost_GT = 100 * 100 * 100 / 3 + (100 * (100 - 1)) / 2 + 100; // O(n^3/3 + nnz) * printf("\tground truth FLOPs: " PRIsize "\n", n_Chol_cost_GT); * printf("\trecorded FLOPs: " PRIsize " (%s)\n", n_Chol_cost, * (n_Chol_cost == n_Chol_cost_GT)? "pass" : * (fabs(double(n_Chol_cost - n_Chol_cost_GT) / n_Chol_cost_GT) < 1e-3)? * "pass within 0.1 %" : "FAIL"); // up to 0.1% discrepancy allowed * * cs *L = p_AllocLower(100, 100); * // get a triangular matrix * * printf("\ncounting FLOPs in TRSV of a 100 x 100 lower-triangular matrix and a 100 x 1 vector\n"); * size_t n_TRSV_cost = n_TRSV_FLOP_Num(L, x); * size_t n_TRSV_cost_GT = 100 * 100 / 2 * 2; * printf("\tground truth FLOPs: " PRIsize "\n", n_TRSV_cost_GT); * printf("\trecorded FLOPs: " PRIsize " (%s)\n", n_TRSV_cost, * (n_TRSV_cost == n_TRSV_cost_GT)? "pass" : "FAIL"); * * cs_spfree(A); * cs_spfree(L); * } * @endcode * * Note that here, FLOPs is a plural of FLOP. It does not refer to floating point * operations per second (FLOPS with capital `S'). * */ #endif // !__FLOP_COUNTING_SCALAR_INCLUDED
parallel_macros.h
// ========================================================================== // SeqAn - The Library for Sequence Analysis // ========================================================================== // Copyright (c) 2006-2013, Knut Reinert, FU Berlin // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of Knut Reinert or the FU Berlin nor the names of // its contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL KNUT REINERT OR THE FU BERLIN BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT // LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY // OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH // DAMAGE. // // ========================================================================== // Author: Manuel Holtgrewe <manuel.holtgrewe@fu-berlin.de> // ========================================================================== // Utility macros for parallelism. // ========================================================================== #ifndef SEQAN_PARALLEL_PARALLEL_MACROS_H_ #define SEQAN_PARALLEL_PARALLEL_MACROS_H_ /*! * @macro SEQAN_OMP_PRAGMA * @brief Portable conditional <tt>#pragma</tt> issuing if OpenMP is enabled. * * @signature SEQAN_OMP_PRAGMA(x) * * @param x The string to issue behind <tt>#pragma omp</tt>. * * @section Remarks * * This macro uses portable pragma generation, dependent on the macro <tt>_OPENMP</tt> being defined (as by * the OpenMP standard). * * This is useful for disabling OpenMP pragmas on compilers that do not support OpenMP or when OpenMP is not enabled to * suppress warnings. * * @section Example * * Parallelize loop with OpenMP if OpenMP is enabled: * * @code{.cpp} * SEQAN_OMP_PRAGMA(parallel for) // becomes: #pragma omp parallel for * for (int i = 0; i < x; ++i) * { * // Do work. * } * @endcode * * Make an addition atomic if OpenMP is enabled: * * @code{.cpp} * SEQAN_OMP_PRAGMA(parallel atomic) // becomes: #pragma omp parallel atomic * i += 1; * @endcode */ /** .Macro.SEQAN_OMP_PRAGMA ..summary:Portable conditional $#pragma$ issuing if OpenMP is enabled. ..cat:Parallelism ..signature:SEQAN_OMP_PRAGMA(x) ..param.x:The string to issue behind $#pragma omp$. ..remarks:This macro uses portable pragma generation, dependent on the macro $_OPENMP$ being defined (as by the OpenMP standard). ..remarks:This is useful for disabling OpenMP pragmas on compilers that do not support OpenMP to suppress warnings. ..example.text:Parallelize loop with OpenMP if OpenMP is enabled: ..example.code: SEQAN_OMP_PRAGMA(parallel for) // becomes: #pragma omp parallel for for (int i = 0; i < x; ++i) { // Do work. } ..example.text:Make an addition atomic if OpenMP is enabled: ..example.code: SEQAN_OMP_PRAGMA(parallel atomic) // becomes: #pragma omp parallel atomic i += 1; */ #ifdef _OPENMP #include <omp.h> #if defined(PLATFORM_WINDOWS_MINGW) || defined(PLATFORM_GCC) // GCC _Pragma operator #define SEQAN_DO_PRAGMA(x) _Pragma(#x) #define SEQAN_OMP_PRAGMA(x) SEQAN_DO_PRAGMA(omp x) #else // #if defined(PLATFORM_WINDOWS_MINGW) || defined(PLATFORM_GCC) // MSVC __pragma-operator #define SEQAN_OMP_PRAGMA(x) __pragma (omp x) #endif // #if defined(PLATFORM_WINDOWS_MINGW) || defined(PLATFORM_GCC) #else // #ifdef _OPENMP #define SEQAN_OMP_PRAGMA(x) // low-level OpenMP runtime compatibility inline void omp_set_num_threads(int) {} inline int omp_get_num_threads() { return 1; } inline int omp_get_max_threads() { return 1; } inline int omp_get_thread_num() { return 0; } #endif // #ifdef _OPENMP #endif // SEQAN_PARALLEL_PARALLEL_MACROS_H_
ecryptfs_fmt_plug.c
/* Cracker for eCryptfs ~/.ecryptfs/wrapped-passphrase. * * We attack "login passphrase" instead of "mount passphrase" (and which could * be 128-bit random key!). * * "ecryptfs_unwrap_passphrase -> generate_passphrase_sig" in * src/libecryptfs/key_management.c is important. * * Do we need to do full decryption as done in "ecryptfs_unwrap_passphrase"? * I believe, 8 bytes of verification data ought to be enough for anybody! * * This software is Copyright (c) 2013 Dhiru Kholia <dhiru at openwall.com> * and it is hereby released to the general public under the following terms: * * Redistribution and use in source and binary forms, with or without * modification, are permitted. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_ecryptfs1; #elif FMT_REGISTERS_H john_register_one(&fmt_ecryptfs1); #else #include <string.h> #include <errno.h> #include "sha2.h" #include "arch.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #include "base64_convert.h" #include "johnswap.h" #include "simd-intrinsics.h" #ifdef _OPENMP static int omp_t = 1; #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 8 // XXX #endif #endif #include "memdbg.h" //#undef SIMD_COEF_64 #define FORMAT_TAG "$ecryptfs$" #define FORMAT_TAG_LENGTH (sizeof(FORMAT_TAG) - 1) #define FORMAT_LABEL "eCryptfs" #define FORMAT_NAME "" #define ALGORITHM_NAME "SHA512 " SHA512_ALGORITHM_NAME #define BENCHMARK_COMMENT " (65536x)" // good luck with that! #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 125 #define REAL_BINARY_SIZE 8 #define HEX_BINARY_SIZE (REAL_BINARY_SIZE*2) #define BINARY_SIZE 64 #define BINARY_ALIGN 4 #define SALT_SIZE sizeof(struct custom_salt) #define SALT_ALIGN 4 #ifdef SIMD_COEF_64 #define MIN_KEYS_PER_CRYPT (SIMD_COEF_64*SIMD_PARA_SHA512) #define MAX_KEYS_PER_CRYPT (SIMD_COEF_64*SIMD_PARA_SHA512) #define GETPOS_512(i, index) ( (index&(SIMD_COEF_64-1))*8 + ((i)&(0xffffffff-7))*SIMD_COEF_64 + (7-((i)&7)) + (unsigned int)index/SIMD_COEF_64*SHA_BUF_SIZ*SIMD_COEF_64 *8 ) #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif /* taken from eCryptfs */ #define ECRYPTFS_DEFAULT_NUM_HASH_ITERATIONS 65536 #define ECRYPTFS_MAX_PASSWORD_LENGTH 64 #define ECRYPTFS_MAX_PASSPHRASE_BYTES ECRYPTFS_MAX_PASSWORD_LENGTH #define ECRYPTFS_SALT_SIZE 8 #define ECRYPTFS_SALT_SIZE_HEX (ECRYPTFS_SALT_SIZE*2) #define ECRYPTFS_DEFAULT_SALT "\x00\x11\x22\x33\x44\x55\x66\x77" #define ECRYPTFS_DEFAULT_SALT_HEX "0011223344556677" #define ECRYPTFS_DEFAULT_SALT_FNEK_HEX "9988776655443322" #define ECRYPTFS_SIG_SIZE 8 #define ECRYPTFS_SIG_SIZE_HEX (ECRYPTFS_SIG_SIZE*2) #define ECRYPTFS_PASSWORD_SIG_SIZE ECRYPTFS_SIG_SIZE_HEX #define ECRYPTFS_MAX_KEY_BYTES 64 #define ECRYPTFS_MAX_ENCRYPTED_KEY_BYTES 512 #define ECRYPTFS_DEFAULT_IV_BYTES 16 static struct fmt_tests ecryptfs_tests[] = { /* hash ==> first 16 bytes of ~/.ecryptfs/wrapped-passphrase */ {"$ecryptfs$0$92dc3db8feaf1676", "openwall"}, {"$ecryptfs$0$ccb515ee115be591", "failpassword"}, {"$ecryptfs$0$8acb10b9e061fcc7", "verylongbutstillfailpassword"}, /* fake hash to test custom salt handling */ {"$ecryptfs$0$1$0000000000000000$884ed410cd143bca", "fake"}, {"$ecryptfs$0$1$544c39674737716a$a8307a01b2d1b008", "fake"}, {NULL} }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE / sizeof(ARCH_WORD_32)]; static struct custom_salt { int iterations; // really really unused (even in the original code) int salt_length; char unsigned salt[ECRYPTFS_SALT_SIZE + 1]; } *cur_salt; static void init(struct fmt_main *self) { #ifdef _OPENMP omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc_align(sizeof(*saved_key), self->params.max_keys_per_crypt, MEM_ALIGN_WORD); crypt_out = mem_calloc_align(sizeof(*crypt_out), self->params.max_keys_per_crypt, MEM_ALIGN_WORD); } static void done(void) { MEM_FREE(crypt_out); MEM_FREE(saved_key); } static int valid(char *ciphertext, struct fmt_main *self) { char *p; if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LENGTH) != 0) return 0; p = ciphertext + FORMAT_TAG_LENGTH; if (*p != '0' || *(p + 1) != '$') return 0; p += 2; if (*p == '1' && *(p + 1) == '$') { // handle salted variety p += 2; if ( abs(hexlenl(p)) != HEX_BINARY_SIZE || p[HEX_BINARY_SIZE] != '$') return 0; p += (HEX_BINARY_SIZE+1); } return hexlenl(p) == HEX_BINARY_SIZE && !p[HEX_BINARY_SIZE]; } static void *get_salt(char *ciphertext) { static struct custom_salt cs; int i; char *p, *q; memset(&cs, 0, SALT_SIZE); p = ciphertext + FORMAT_TAG_LENGTH; p = p + 2; // skip over "0$" /* support for custom salt */ if (*p == '1' && *(p + 1) == '$') { p = p + 2; q = strchr(p, '$'); cs.salt_length = (q - p) / 2; for (i = 0; i < cs.salt_length; i++) cs.salt[i] = (atoi16[ARCH_INDEX(p[2 * i])] << 4) | atoi16[ARCH_INDEX(p[2 * i + 1])]; } else { memcpy(cs.salt, ECRYPTFS_DEFAULT_SALT, ECRYPTFS_SALT_SIZE); } return (void *)&cs; } static void *get_binary(char *ciphertext) { static union { unsigned char c[REAL_BINARY_SIZE]; ARCH_WORD_32 dummy; } buf; unsigned char *out = buf.c; int i; char *p = strrchr(ciphertext, '$') + 1; for (i = 0; i < REAL_BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) { int j; SHA512_CTX ctx; #ifdef SIMD_COEF_64 unsigned char tmpBuf[64]; unsigned int i; unsigned char _IBuf[128*MAX_KEYS_PER_CRYPT+MEM_ALIGN_CACHE], *keys; ARCH_WORD_64 *keys64; keys = (unsigned char*)mem_align(_IBuf, MEM_ALIGN_CACHE); keys64 = (ARCH_WORD_64*)keys; memset(keys, 0, 128*MAX_KEYS_PER_CRYPT); for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) { SHA512_Init(&ctx); SHA512_Update(&ctx, cur_salt->salt, ECRYPTFS_SALT_SIZE); SHA512_Update(&ctx, saved_key[index+i], strlen(saved_key[index+i])); SHA512_Final((unsigned char *)tmpBuf, &ctx); for (j = 0; j < 64; ++j) keys[GETPOS_512(j, i)] = tmpBuf[j]; keys[GETPOS_512(j, i)] = 0x80; // 64 bytes of crypt data (0x200 bits). keys[GETPOS_512(126, i)] = 0x02; } for (j = 1; j < ECRYPTFS_DEFAULT_NUM_HASH_ITERATIONS; j++) SIMDSHA512body(keys, keys64, NULL, SSEi_MIXED_IN|SSEi_OUTPUT_AS_INP_FMT); // Last one with FLAT_OUT SIMDSHA512body(keys, (ARCH_WORD_64*)crypt_out[index], NULL, SSEi_MIXED_IN|SSEi_OUTPUT_AS_INP_FMT|SSEi_FLAT_OUT); #else SHA512_Init(&ctx); SHA512_Update(&ctx, cur_salt->salt, ECRYPTFS_SALT_SIZE); SHA512_Update(&ctx, saved_key[index], strlen(saved_key[index])); SHA512_Final((unsigned char *)crypt_out[index], &ctx); /* now "h" (crypt_out[index] becomes our input, total SHA-512 calls => 65536 */ for (j = 1; j <= ECRYPTFS_DEFAULT_NUM_HASH_ITERATIONS; j++) { SHA512_CTX ctx; SHA512_Init(&ctx); SHA512_Update(&ctx, (unsigned char*)crypt_out[index], BINARY_SIZE); SHA512_Final((unsigned char *)crypt_out[index], &ctx); } #endif } return count; } static int cmp_all(void *binary, int count) { int index = 0; for (; index < count; index++) if (!memcmp(binary, crypt_out[index], REAL_BINARY_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], REAL_BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } static void ecryptfs_set_key(char *key, int index) { int saved_len = strlen(key); if (saved_len > PLAINTEXT_LENGTH) saved_len = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, saved_len); saved_key[index][saved_len] = 0; } static char *get_key(int index) { return saved_key[index]; } struct fmt_main fmt_ecryptfs1 = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, REAL_BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, { NULL }, ecryptfs_tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, set_salt, ecryptfs_set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
doacross-1.c
/* { dg-do compile } */ /* { dg-options "-fopenmp" } */ void foo (void) { int i, j, k; #pragma omp for ordered (1) for (i = 0; i < 64; i++) { #pragma omp ordered depend (sink: i - 1) #pragma omp ordered depend (source) } #pragma omp for ordered (1) collapse (1) for (i = 0; i < 64; i++) { #pragma omp ordered depend (sink: i - 1) #pragma omp ordered depend (source) } #pragma omp for collapse (2) ordered (1) /* { dg-error "clause parameter is less than" } */ for (i = 0; i < 64; i++) for (j = 0; j < 64; j++) { #pragma omp ordered depend (sink: i - 1) /* { dg-error "does not match number" } */ #pragma omp ordered depend (source) } #pragma omp for ordered (2) collapse (3) /* { dg-error "clause parameter is less than" } */ for (i = 0; i < 64; i++) for (j = 0; j < 64; j++) for (k = 0; k < 64; k++) { #pragma omp ordered depend (sink: i - 1, j - 2) /* { dg-error "does not match number" } */ #pragma omp ordered depend (source) } #pragma omp ordered depend (sink: j) /* { dg-error "clause must be closely nested inside an .ordered. loop" } */ #pragma omp ordered depend (source) /* { dg-error "clause must be closely nested inside an .ordered. loop" } */ #pragma omp for ordered (1) for (i = 0; i < 64; i++) { #pragma omp ordered depend (sink: i - 1) depend (sink: i - 2) #pragma omp ordered depend (source) depend (source) /* { dg-error "more than one .depend. clause with .source. modifier on an .ordered. construct" } */ } #pragma omp for ordered (1) for (i = 0; i < 64; i++) { #pragma omp ordered depend (sink: i - 1) depend (source) depend (sink: i - 2) /* { dg-error ".depend. clause with .source. modifier specified together with .depend. clauses with .sink. modifier on the same construct" } */ } }
BRKGA.h
/** * BRKGA.h * * This template class encapsulates a Biased Random-key Genetic Algorithm for minimization problems * with K independent Populations stored in two vectors of Population, current and previous. * It supports multi-threading via OpenMP, and implements the following key methods: * * - BRKGA() constructor: initializes the populations with parameters described below. * - evolve() operator: evolve each Population following the BRKGA methodology. This method * supports OpenMP to evolve up to K independent Populations in parallel. * Please note that double Decoder::decode(...) MUST be thread-safe. * * Required parameters: * - n: number of genes in each chromosome * - p: number of elements in each population * - pe: pct of elite items into each population * - pm: pct of mutants introduced at each generation into the population * - rhoe: probability that an offspring inherits the allele of its elite parent * * Optional parameters: * - K: number of independent Populations (set to 1 if not supplied) * - MAX_THREADS: number of threads to perform parallel decoding (set to 1 if not supplied) * WARNING: Decoder::decode() MUST be thread-safe if MAX_THREADS > 1! * * The following objects are required upon declaration: * RNG: random number generator that implements the methods below. * - RNG(unsigned long seed) to initialize a new RNG with 'seed' * - double rand() to return a double precision random deviate in range [0,1) * - unsigned long randInt() to return a >=32-bit unsigned random deviate in range [0,2^32-1) * - unsigned long randInt(N) to return a unsigned random deviate in range [0, N] with N < 2^32 * * Decoder: problem-specific decoder that implements any of the decode methods outlined below. When * compiling and linking BRKGA with -fopenmp (i.e., with multithreading support via * OpenMP), the method must be thread-safe. * - double decode(const vector< double >& chromosome) const, if you don't want to change * chromosomes inside the framework, or * - double decode(vector< double >& chromosome) const, if you'd like to update a chromosome. * WARNING: even though both methods use const correctness to enforce that they are thread safe * the use of mutable within the Decoder class could void such a feature! In other * words, DO NOT use mutable within the decoder. * * Created on : Jun 22, 2010 by rtoso * Last update: Sep 15, 2011 by rtoso * Authors : Rodrigo Franco Toso <rtoso@cs.rutgers.edu> * Mauricio G.C. Resende <mgcr@research.att.com> * * The MIT License (MIT) * * Copyright (c) 2018 * Rodrigo Franco Toso (rfrancotoso@gmail.com) and * Mauricio G.C. Resende * * Permission is hereby granted, free of charge, to any person obtaining a copy of * this software and associated documentation files (the "Software"), to deal in * the Software without restriction, including without limitation the rights to * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies * of the Software, and to permit persons to whom the Software is furnished to do * so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #ifndef BRKGA_H #define BRKGA_H #include <omp.h> #include <algorithm> #include <exception> #include <stdexcept> #include "Population.h" template< class Decoder, class RNG > class BRKGA { public: /* * Default constructor * Required hyperparameters: * - n: number of genes in each chromosome * - p: number of elements in each population * - pe: pct of elite items into each population * - pm: pct of mutants introduced at each generation into the population * - rhoe: probability that an offspring inherits the allele of its elite parent * * Optional parameters: * - K: number of independent Populations * - MAX_THREADS: number of threads to perform parallel decoding * WARNING: Decoder::decode() MUST be thread-safe; safe if implemented as * + double Decoder::decode(std::vector< double >& chromosome) const */ BRKGA(unsigned n, unsigned p, double pe, double pm, double rhoe, const Decoder& refDecoder, RNG& refRNG, unsigned K = 1, unsigned MAX_THREADS = 1) throw(std::range_error); /** * Destructor */ ~BRKGA(); /** * Resets all populations with brand new keys */ void reset(); /** * Evolve the current populations following the guidelines of BRKGAs * @param generations number of generations (must be even and nonzero) * @param J interval to exchange elite chromosomes (must be even; 0 ==> no synchronization) * @param M number of elite chromosomes to select from each population in order to exchange */ void evolve(unsigned generations = 1); /** * Exchange elite-solutions between the populations * @param M number of elite chromosomes to select from each population */ void exchangeElite(unsigned M) throw(std::range_error); /** * Returns the current population */ const Population& getPopulation(unsigned k = 0) const; /** * Returns the chromosome with best fitness so far among all populations */ const std::vector< double >& getBestChromosome() const; /** * Returns the best fitness found so far among all populations */ double getBestFitness() const; // Return copies to the internal parameters: unsigned getN() const; unsigned getP() const; unsigned getPe() const; unsigned getPm() const; unsigned getPo() const; double getRhoe() const; unsigned getK() const; unsigned getMAX_THREADS() const; private: // I don't see any reason to pimpl the internal methods and data, so here they are: // Hyperparameters: const unsigned n; // number of genes in the chromosome const unsigned p; // number of elements in the population const unsigned pe; // number of elite items in the population const unsigned pm; // number of mutants introduced at each generation into the population const double rhoe; // probability that an offspring inherits the allele of its elite parent // Templates: RNG& refRNG; // reference to the random number generator const Decoder& refDecoder; // reference to the problem-dependent Decoder // Parallel populations parameters: const unsigned K; // number of independent parallel populations const unsigned MAX_THREADS; // number of threads for parallel decoding // Data: std::vector< Population* > previous; // previous populations std::vector< Population* > current; // current populations // Local operations: void initialize(const unsigned i); // initialize current population 'i' with random keys void evolution(Population& curr, Population& next); bool isRepeated(const std::vector< double >& chrA, const std::vector< double >& chrB) const; }; template< class Decoder, class RNG > BRKGA< Decoder, RNG >::BRKGA(unsigned _n, unsigned _p, double _pe, double _pm, double _rhoe, const Decoder& decoder, RNG& rng, unsigned _K, unsigned MAX) throw(std::range_error) : n(_n), p(_p), pe(unsigned(_pe * p)), pm(unsigned(_pm * p)), rhoe(_rhoe), refRNG(rng), refDecoder(decoder), K(_K), MAX_THREADS(MAX), previous(K, 0), current(K, 0) { // Error check: using std::range_error; if(n == 0) { throw range_error("Chromosome size equals zero."); } if(p == 0) { throw range_error("Population size equals zero."); } if(pe == 0) { throw range_error("Elite-set size equals zero."); } if(pe > p) { throw range_error("Elite-set size greater than population size (pe > p)."); } if(pm > p) { throw range_error("Mutant-set size (pm) greater than population size (p)."); } if(pe + pm > p) { throw range_error("elite + mutant sets greater than population size (p)."); } if(K == 0) { throw range_error("Number of parallel populations cannot be zero."); } // Initialize and decode each chromosome of the current population, then copy to previous: for(unsigned i = 0; i < K; ++i) { // Allocate: current[i] = new Population(n, p); // Initialize: initialize(i); // Then just copy to previous: previous[i] = new Population(*current[i]); } } template< class Decoder, class RNG > BRKGA< Decoder, RNG >::~BRKGA() { for(unsigned i = 0; i < K; ++i) { delete current[i]; delete previous[i]; } } template< class Decoder, class RNG > const Population& BRKGA< Decoder, RNG >::getPopulation(unsigned k) const { #ifdef RANGECHECK if(k >= K) { throw std::range_error("Invalid population identifier."); } #endif return (*current[k]); } template< class Decoder, class RNG > double BRKGA< Decoder, RNG >::getBestFitness() const { double best = current[0]->fitness[0].first; for(unsigned i = 1; i < K; ++i) { if(current[i]->fitness[0].first < best) { best = current[i]->fitness[0].first; } } return best; } template< class Decoder, class RNG > const std::vector< double >& BRKGA< Decoder, RNG >::getBestChromosome() const { unsigned bestK = 0; for(unsigned i = 1; i < K; ++i) { if( current[i]->getBestFitness() < current[bestK]->getBestFitness() ) { bestK = i; } } return current[bestK]->getChromosome(0); // The top one :-) } template< class Decoder, class RNG > void BRKGA< Decoder, RNG >::reset() { for(unsigned i = 0; i < K; ++i) { initialize(i); } } template< class Decoder, class RNG > void BRKGA< Decoder, RNG >::evolve(unsigned generations) { #ifdef RANGECHECK if(generations == 0) { throw std::range_error("Cannot evolve for 0 generations."); } #endif for(unsigned i = 0; i < generations; ++i) { for(unsigned j = 0; j < K; ++j) { evolution(*current[j], *previous[j]); // First evolve the population (curr, next) std::swap(current[j], previous[j]); // Update (prev = curr; curr = prev == next) } } } template< class Decoder, class RNG > void BRKGA< Decoder, RNG >::exchangeElite(unsigned M) throw(std::range_error) { #ifdef RANGECHECK if(M == 0 || M >= p) { throw std::range_error("M cannot be zero or >= p."); } #endif for(unsigned i = 0; i < K; ++i) { // Population i will receive some elite members from each Population j below: unsigned dest = p - 1; // Last chromosome of i (will be updated below) for(unsigned j = 0; j < K; ++j) { if(j == i) { continue; } // Copy the M best of Population j into Population i: for(unsigned m = 0; m < M; ++m) { // Copy the m-th best of Population j into the 'dest'-th position of Population i: const std::vector< double >& bestOfJ = current[j]->getChromosome(m); std::copy(bestOfJ.begin(), bestOfJ.end(), current[i]->getChromosome(dest).begin()); current[i]->fitness[dest].first = current[j]->fitness[m].first; --dest; } } } for(int j = 0; j < int(K); ++j) { current[j]->sortFitness(); } } template< class Decoder, class RNG > inline void BRKGA< Decoder, RNG >::initialize(const unsigned i) { for(unsigned j = 0; j < p; ++j) { for(unsigned k = 0; k < n; ++k) { (*current[i])(j, k) = refRNG.rand(); } } // Decode: #ifdef _OPENMP #pragma omp parallel for num_threads(MAX_THREADS) #endif for(int j = 0; j < int(p); ++j) { current[i]->setFitness(j, refDecoder.decode((*current[i])(j)) ); } // Sort: current[i]->sortFitness(); } template< class Decoder, class RNG > inline void BRKGA< Decoder, RNG >::evolution(Population& curr, Population& next) { // We now will set every chromosome of 'current', iterating with 'i': unsigned i = 0; // Iterate chromosome by chromosome unsigned j = 0; // Iterate allele by allele // 2. The 'pe' best chromosomes are maintained, so we just copy these into 'current': while(i < pe) { for(j = 0 ; j < n; ++j) { next(i,j) = curr(curr.fitness[i].second, j); } next.fitness[i].first = curr.fitness[i].first; next.fitness[i].second = i; ++i; } // 3. We'll mate 'p - pe - pm' pairs; initially, i = pe, so we need to iterate until i < p - pm: while(i < p - pm) { // Select an elite parent: const unsigned eliteParent = (refRNG.randInt(pe - 1)); // Select a non-elite parent: const unsigned noneliteParent = pe + (refRNG.randInt(p - pe - 1)); // Mate: for(j = 0; j < n; ++j) { const unsigned& sourceParent = ((refRNG.rand() < rhoe) ? eliteParent : noneliteParent); next(i, j) = curr(curr.fitness[sourceParent].second, j); } ++i; } // We'll introduce 'pm' mutants: while(i < p) { for(j = 0; j < n; ++j) { next(i, j) = refRNG.rand(); } ++i; } // Time to compute fitness, in parallel: #ifdef _OPENMP #pragma omp parallel for num_threads(MAX_THREADS) #endif for(int i = int(pe); i < int(p); ++i) { next.setFitness( i, refDecoder.decode(next.population[i]) ); } // Now we must sort 'current' by fitness, since things might have changed: next.sortFitness(); } template< class Decoder, class RNG > unsigned BRKGA<Decoder, RNG>::getN() const { return n; } template< class Decoder, class RNG > unsigned BRKGA<Decoder, RNG>::getP() const { return p; } template< class Decoder, class RNG > unsigned BRKGA<Decoder, RNG>::getPe() const { return pe; } template< class Decoder, class RNG > unsigned BRKGA<Decoder, RNG>::getPm() const { return pm; } template< class Decoder, class RNG > unsigned BRKGA<Decoder, RNG>::getPo() const { return p - pe - pm; } template< class Decoder, class RNG > double BRKGA<Decoder, RNG>::getRhoe() const { return rhoe; } template< class Decoder, class RNG > unsigned BRKGA<Decoder, RNG>::getK() const { return K; } template< class Decoder, class RNG > unsigned BRKGA<Decoder, RNG>::getMAX_THREADS() const { return MAX_THREADS; } #endif
FiniteElementMesh.h
#pragma once #include <omp.h> #include "AnimatedMesh.h" #include "CGVectorWrapper.h" #include "CGSystemWrapper.h" #include "ConjugateGradient.h" #include <Eigen/Dense> //#define USE_LINEAR_ELASTICITY #define USE_ST_VENANT_KIRCHHOFF template<class T> struct FiniteElementMesh : public AnimatedMesh<T, 3> { using Base = AnimatedMesh<T, 3>; using Base::m_meshElements; using Base::m_particleX; using Vector2 = typename Base::Vector2; using Matrix22 = Eigen::Matrix< T , 2 , 2>; int m_nFrames; int m_subSteps; T m_frameDt; T m_stepDt; T m_stepEndTime; const T m_density; const T m_mu; const T m_lambda; const T m_rayleighCoefficient; std::vector<T> m_particleMass; std::vector<Matrix22> m_DmInverse; std::vector<T> m_restVolume; FiniteElementMesh(const T density, const T mu, const T lambda, const T rayleighCoefficient) :m_density(density), m_mu(mu), m_lambda(lambda), m_rayleighCoefficient(rayleighCoefficient) {} void initializeUndeformedConfiguration() { // Initialize rest shape and particle mass (based on constant density) m_particleMass.resize(m_particleX.size(), T()); // Initialize all particle masses to zero for(const auto& element: m_meshElements) { Matrix22 Dm; for(int j = 0; j < 2; j++) Dm.col(j) = m_particleX[element[j+1]]-m_particleX[element[0]]; T restVolume = .5 * Dm.determinant(); if(restVolume < 0) throw std::logic_error("Inverted element"); m_DmInverse.emplace_back(Dm.inverse()); m_restVolume.push_back(restVolume); T elementMass = m_density * restVolume; for(const int v: element) m_particleMass[v] += (1./3.) * elementMass; } } void addElasticForce(std::vector<Vector2>& f) const { #pragma omp parallel for for(int e = 0; e < m_meshElements.size(); e++) { const auto& element = m_meshElements[e]; // Compute deformation gradient Matrix22 Ds; for(int j = 0; j < 2; j++) Ds.col(j) = m_particleX[element[j+1]]-m_particleX[element[0]]; Matrix22 F = Ds * m_DmInverse[e]; #ifdef USE_LINEAR_ELASTICITY Matrix22 strain = .5 * (F + F.transpose()) - Matrix22::Identity(); Matrix22 P = 2. * m_mu * strain + m_lambda * strain.trace() * Matrix22::Identity(); #endif #ifdef USE_ST_VENANT_KIRCHHOFF Matrix22 E = .5 * ( F.transpose() * F - Matrix22::Identity()); Matrix22 P = F * (2. * m_mu * E + m_lambda * E.trace() * Matrix22::Identity()); #endif Matrix22 H = -m_restVolume[e] * P * m_DmInverse[e].transpose(); #pragma omp critical { for(int j = 0; j < 2; j++){ f[element[j+1]] += H.col(j); f[element[0]] -= H.col(j); } } } } void addProductWithStiffnessMatrix(std::vector<Vector2>& dx, std::vector<Vector2>& df, const T scale) const { #pragma omp parallel for for(int e = 0; e < m_meshElements.size(); e++) { const auto& element = m_meshElements[e]; // Compute deformation gradient Matrix22 Ds; for(int j = 0; j < 2; j++) Ds.col(j) = m_particleX[element[j+1]]-m_particleX[element[0]]; Matrix22 F = Ds * m_DmInverse[e]; // Compute differential(s) Matrix22 dDs; for(int j = 0; j < 2; j++) dDs.col(j) = dx[element[j+1]]-dx[element[0]]; Matrix22 dF = dDs * m_DmInverse[e]; #ifdef USE_LINEAR_ELASTICITY Matrix22 dstrain = .5 * (dF + dF.transpose()); Matrix22 dP = scale * (2. * m_mu * dstrain + m_lambda * dstrain.trace() * Matrix22::Identity()); #endif #ifdef USE_ST_VENANT_KIRCHHOFF Matrix22 E = .5 * ( F.transpose() * F - Matrix22::Identity()); Matrix22 dE = .5 * ( dF.transpose() * F + F.transpose() * dF); Matrix22 dP = dF * (2. * m_mu * E + m_lambda * E.trace() * Matrix22::Identity()) + F * (2. * m_mu * dE + m_lambda * dE.trace() * Matrix22::Identity()); #endif Matrix22 dH = m_restVolume[e] * dP * m_DmInverse[e].transpose(); for(int j = 0; j < 2; j++){ df[element[j+1]] += dH.col(j); df[element[0]] -= dH.col(j); } } } void simulateSubstep() { using FEMType = FiniteElementMesh<T>; const int nParticles = m_particleX.size(); // Construct initial guess for next-timestep // Positions -> Same as last timestep // Overwrite boundary conditions with desired values setBoundaryConditions(); // Solve for everything else using Conjugate Gradients std::vector<Vector2> dx(nParticles, Vector2::Zero()); std::vector<Vector2> rhs(nParticles, Vector2::Zero()); std::vector<Vector2> q(nParticles, Vector2::Zero()); std::vector<Vector2> s(nParticles, Vector2::Zero()); std::vector<Vector2> r(nParticles, Vector2::Zero()); CGVectorWrapper<Vector2> dxWrapper(dx); CGVectorWrapper<Vector2> rhsWrapper(rhs); CGVectorWrapper<Vector2> qWrapper(q); CGVectorWrapper<Vector2> sWrapper(s); CGVectorWrapper<Vector2> rWrapper(r); CGSystemWrapper<Vector2, FEMType> systemWrapper(*this); addElasticForce(rhs); clearConstrainedParticles(rhs); ConjugateGradient<T>::Solve(systemWrapper, dxWrapper, rhsWrapper, qWrapper, sWrapper, rWrapper, 1e-4, 50); // Apply corrections to positions and velocities for(int p = 0; p < nParticles; p++) m_particleX[p] += dx[p]; } void simulateFrame(const int frame) { m_stepDt = m_frameDt / (T) m_subSteps; for(int step = 1; step <= m_subSteps; step++){ m_stepEndTime = m_frameDt * (T) (frame-1) + m_stepDt * (T) step; simulateSubstep(); } } virtual void clearConstrainedParticles(std::vector<Vector2>& x) {} virtual void setBoundaryConditions() {} };
cfae4d_so8_gcc.c
#define _POSIX_C_SOURCE 200809L #include "stdlib.h" #include "math.h" #include "sys/time.h" #include "xmmintrin.h" #include "pmmintrin.h" #include <stdio.h> #include "omp.h" #define min(a, b) (((a) < (b)) ? (a) : (b)) #define max(a, b) (((a) > (b)) ? (a) : (b)) struct dataobj { void *restrict data; int *size; int *npsize; int *dsize; int *hsize; int *hofs; int *oofs; }; struct profiler { double section0; double section1; }; void bf0(struct dataobj *restrict damp_vec, const float dt, struct dataobj *restrict u_vec, struct dataobj *restrict vp_vec, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict sp_source_mask_vec, struct dataobj *restrict save_src_u_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict source_mask_vec, const int t0, const int t1, const int t2, const int x0_blk0_size, const int x_M, const int x_m, const int y0_blk0_size, const int y_M, const int y_m, const int z_M, const int z_m, const int sp_zi_m, const int nthreads, const int xb, const int yb, const int xb_size, const int yb_size, const int time, const int tw); int Kernel(struct dataobj *restrict block_sizes_vec, struct dataobj *restrict damp_vec, const float dt, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict save_src_u_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict source_mask_vec, struct dataobj *restrict sp_source_mask_vec, struct dataobj *restrict u_vec, struct dataobj *restrict vp_vec, const int sp_zi_m, const int time_M, const int time_m, const int x_M, const int x_m, const int y_M, const int y_m, const int z_M, const int z_m, const int nthreads, const int nthreads_nonaffine, struct profiler *timers) { int(*restrict block_sizes) __attribute__((aligned(64))) = (int(*))block_sizes_vec->data; int(*restrict nnz_sp_source_mask)[nnz_sp_source_mask_vec->size[1]] __attribute__((aligned(64))) = (int(*)[nnz_sp_source_mask_vec->size[1]])nnz_sp_source_mask_vec->data; float(*restrict save_src_u)[save_src_u_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_u_vec->size[1]])save_src_u_vec->data; int(*restrict source_id)[source_id_vec->size[1]][source_id_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_id_vec->size[1]][source_id_vec->size[2]])source_id_vec->data; int(*restrict source_mask)[source_mask_vec->size[1]][source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_mask_vec->size[1]][source_mask_vec->size[2]])source_mask_vec->data; int(*restrict sp_source_mask)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]])sp_source_mask_vec->data; float(*restrict u)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]] __attribute__((aligned(64))) = (float(*)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]])u_vec->data; /* Flush denormal numbers to zero in hardware */ _MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON); _MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON); int xb_size = block_sizes[0]; int y0_blk0_size = block_sizes[3]; int x0_blk0_size = block_sizes[2]; int yb_size = block_sizes[1]; printf(" Tiles: %d, %d ::: Blocks %d, %d \n", xb_size, yb_size, x0_blk0_size, y0_blk0_size); //for (int time = time_m, t0 = (time + 2)%(3), t1 = (time)%(3), t2 = (time + 1)%(3); time <= time_M; time += 1, t0 = (time + 2)%(3), t1 = (time)%(3), t2 = (time + 1)%(3)) //{ int sf = 4; int t_blk_size = 2 * sf * (time_M - time_m); struct timeval start_section0, end_section0; gettimeofday(&start_section0, NULL); for (int t_blk = time_m; t_blk < sf * (time_M - time_m); t_blk += sf * t_blk_size) // for each t block { for (int xb = x_m; xb <= (x_M + sf * (time_M - time_m)); xb += xb_size + 1) { for (int yb = y_m; yb <= (y_M + sf * (time_M - time_m)); yb += yb_size + 1) { for (int time = t_blk, t0 = (time) % (3), t1 = (time + 2) % (3), t2 = (time + 1) % (3); time <= 1 + min(t_blk + t_blk_size - 1, sf * (time_M - time_m)); time += sf, t0 = (((time / sf) % (time_M - time_m + 1))) % (3), t1 = (((time / sf) % (time_M - time_m + 1)) + 2 ) % (3), t2 = (((time / sf) % (time_M - time_m + 1)) + 1) % (3)) { int tw = ((time / sf) % (time_M - time_m + 1)); bf0(damp_vec, dt, u_vec, vp_vec, nnz_sp_source_mask_vec, sp_source_mask_vec, save_src_u_vec, source_id_vec, source_mask_vec, t0, t1, t2, x0_blk0_size, x_M - (x_M - x_m + 1) % (x0_blk0_size), x_m, y0_blk0_size, y_M - (y_M - y_m + 1) % (y0_blk0_size), y_m, z_M, z_m, sp_zi_m, nthreads, xb, yb, xb_size, yb_size, time, tw); //bf0(damp_vec, dt, u_vec, vp_vec, t0, t1, t2, x0_blk0_size, x_M - (x_M - x_m + 1) % (x0_blk0_size), x_m, (y_M - y_m + 1) % (y0_blk0_size), y_M, y_M - (y_M - y_m + 1) % (y0_blk0_size) + 1, z_M, z_m, nthreads); //bf0(damp_vec, dt, u_vec, vp_vec, t0, t1, t2, (x_M - x_m + 1) % (x0_blk0_size), x_M, x_M - (x_M - x_m + 1) % (x0_blk0_size) + 1, y0_blk0_size, y_M - (y_M - y_m + 1) % (y0_blk0_size), y_m, z_M, z_m, nthreads); //bf0(damp_vec, dt, u_vec, vp_vec, t0, t1, t2, (x_M - x_m + 1) % (x0_blk0_size), x_M, x_M - (x_M - x_m + 1) % (x0_blk0_size) + 1, (y_M - y_m + 1) % (y0_blk0_size), y_M, y_M - (y_M - y_m + 1) % (y0_blk0_size) + 1, z_M, z_m, nthreads); } } } /* End section0 */ gettimeofday(&end_section0, NULL); timers->section0 += (double)(end_section0.tv_sec - start_section0.tv_sec) + (double)(end_section0.tv_usec - start_section0.tv_usec) / 1000000; } for (int time = time_m, t2 = (time + 1) % (3); time <= time_M; time += 1, t2 = (time + 1) % (3)) { struct timeval start_section1, end_section1; gettimeofday(&start_section1, NULL); /* Begin section1 */ /* End section1 */ gettimeofday(&end_section1, NULL); timers->section1 += (double)(end_section1.tv_sec - start_section1.tv_sec) + (double)(end_section1.tv_usec - start_section1.tv_usec) / 1000000; } return 0; } void bf0(struct dataobj *restrict damp_vec, const float dt, struct dataobj *restrict u_vec, struct dataobj *restrict vp_vec, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict sp_source_mask_vec, struct dataobj *restrict save_src_u_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict source_mask_vec, const int t0, const int t1, const int t2, const int x0_blk0_size, const int x_M, const int x_m, const int y0_blk0_size, const int y_M, const int y_m, const int z_M, const int z_m, const int sp_zi_m, const int nthreads, const int xb, const int yb, const int xb_size, const int yb_size, const int time, const int tw) { float(*restrict damp)[damp_vec->size[1]][damp_vec->size[2]] __attribute__((aligned(64))) = (float(*)[damp_vec->size[1]][damp_vec->size[2]])damp_vec->data; float(*restrict u)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]] __attribute__((aligned(64))) = (float(*)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]])u_vec->data; float(*restrict vp)[vp_vec->size[1]][vp_vec->size[2]] __attribute__((aligned(64))) = (float(*)[vp_vec->size[1]][vp_vec->size[2]])vp_vec->data; int(*restrict nnz_sp_source_mask)[nnz_sp_source_mask_vec->size[1]] __attribute__((aligned(64))) = (int(*)[nnz_sp_source_mask_vec->size[1]])nnz_sp_source_mask_vec->data; float(*restrict save_src_u)[save_src_u_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_u_vec->size[1]])save_src_u_vec->data; int(*restrict source_id)[source_id_vec->size[1]][source_id_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_id_vec->size[1]][source_id_vec->size[2]])source_id_vec->data; int(*restrict source_mask)[source_mask_vec->size[1]][source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_mask_vec->size[1]][source_mask_vec->size[2]])source_mask_vec->data; int(*restrict sp_source_mask)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]])sp_source_mask_vec->data; if (x0_blk0_size == 0 || y0_blk0_size == 0) { return; } #pragma omp parallel num_threads(nthreads) { #pragma omp for collapse(2) schedule(dynamic, 1) for (int x0_blk0 = max((x_m + time), xb); x0_blk0 <= min((x_M + time), (xb + xb_size)); x0_blk0 += x0_blk0_size) { for (int y0_blk0 = max((y_m + time), yb); y0_blk0 <= min((y_M + time), (yb + yb_size)); y0_blk0 += y0_blk0_size) { for (int x = x0_blk0; x <= min(min((x_M + time), (xb + xb_size)), (x0_blk0 + x0_blk0_size - 1)); x++) { for (int y = y0_blk0; y <= min(min((y_M + time), (yb + yb_size)), (y0_blk0 + y0_blk0_size - 1)); y++) { #pragma omp simd aligned(damp, u, vp : 32) for (int z = z_m; z <= z_M; z += 1) { float r8 = 1.0/dt; float r7 = 1.0/(dt*dt); float r6 = 1.0/(vp[x - time + 8][y - time + 8][z + 8]*vp[x - time + 8][y - time + 8][z + 8]); u[t2][x - time + 8][y - time + 8][z + 8] = (r6*(-r7*(-2.0F*u[t0][x - time + 8][y - time + 8][z + 8] + u[t1][x - time + 8][y - time + 8][z + 8])) + r8*(damp[x - time + 1][y - time + 1][z + 1]*u[t0][x - time + 8][y - time + 8][z + 8]) - 7.93650813e-6F*(u[t0][x - time + 4][y - time + 8][z + 8] + u[t0][x - time + 8][y - time + 4][z + 8] + u[t0][x - time + 8][y - time + 8][z + 4] + u[t0][x - time + 8][y - time + 8][z + 12] + u[t0][x - time + 8][y - time + 12][z + 8] + u[t0][x - time + 12][y - time + 8][z + 8]) + 1.12874782e-4F*(u[t0][x - time + 5][y - time + 8][z + 8] + u[t0][x - time + 8][y - time + 5][z + 8] + u[t0][x - time + 8][y - time + 8][z + 5] + u[t0][x - time + 8][y - time + 8][z + 11] + u[t0][x - time + 8][y - time + 11][z + 8] + u[t0][x - time + 11][y - time + 8][z + 8]) - 8.8888891e-4F*(u[t0][x - time + 6][y - time + 8][z + 8] + u[t0][x - time + 8][y - time + 6][z + 8] + u[t0][x - time + 8][y - time + 8][z + 6] + u[t0][x - time + 8][y - time + 8][z + 10] + u[t0][x - time + 8][y - time + 10][z + 8] + u[t0][x - time + 10][y - time + 8][z + 8]) + 7.11111128e-3F*(u[t0][x - time + 7][y - time + 8][z + 8] + u[t0][x - time + 8][y - time + 7][z + 8] + u[t0][x - time + 8][y - time + 8][z + 7] + u[t0][x - time + 8][y - time + 8][z + 9] + u[t0][x - time + 8][y - time + 9][z + 8] + u[t0][x - time + 9][y - time + 8][z + 8]) - 3.79629639e-2F*u[t0][x - time + 8][y - time + 8][z + 8])/(r6*r7 + r8*damp[x - time + 1][y - time + 1][z + 1]); } #pragma omp simd aligned(damp, u, vp : 32) for (int sp_zi = sp_zi_m; sp_zi <= nnz_sp_source_mask[x - time][y - time] - 1; sp_zi += 1) { int zind = sp_source_mask[x - time][y - time][sp_zi]; float r0 = save_src_u[tw][source_id[x - time][y - time][zind]] * source_mask[x - time][y - time][zind]; u[t2][x - time + 8][y - time + 8][zind + 8] += r0; } } } } } } }
IJMatrix_parcsr.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ /****************************************************************************** * * IJMatrix_ParCSR interface * *****************************************************************************/ #include "_hypre_IJ_mv.h" #include "_hypre_parcsr_mv.h" #include "../HYPRE.h" /****************************************************************************** * * hypre_IJMatrixCreateParCSR * *****************************************************************************/ HYPRE_Int hypre_IJMatrixCreateParCSR(hypre_IJMatrix *matrix) { MPI_Comm comm = hypre_IJMatrixComm(matrix); HYPRE_BigInt *row_partitioning = hypre_IJMatrixRowPartitioning(matrix); HYPRE_BigInt *col_partitioning = hypre_IJMatrixColPartitioning(matrix); hypre_ParCSRMatrix *par_matrix; HYPRE_BigInt *row_starts; HYPRE_BigInt *col_starts; HYPRE_Int num_procs; HYPRE_Int i; hypre_MPI_Comm_size(comm,&num_procs); row_starts = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); if (hypre_IJMatrixGlobalFirstRow(matrix)) { for (i = 0; i < 2; i++) { row_starts[i] = row_partitioning[i] - hypre_IJMatrixGlobalFirstRow(matrix); } } else { for (i = 0; i < 2; i++) { row_starts[i] = row_partitioning[i]; } } if (row_partitioning != col_partitioning) { col_starts = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); if (hypre_IJMatrixGlobalFirstCol(matrix)) { for (i = 0; i < 2; i++) { col_starts[i] = col_partitioning[i]-hypre_IJMatrixGlobalFirstCol(matrix); } } else { for (i = 0; i < 2; i++) { col_starts[i] = col_partitioning[i]; } } } else { col_starts = row_starts; } par_matrix = hypre_ParCSRMatrixCreate(comm, hypre_IJMatrixGlobalNumRows(matrix), hypre_IJMatrixGlobalNumCols(matrix), row_starts, col_starts, 0, 0, 0); hypre_IJMatrixObject(matrix) = par_matrix; return hypre_error_flag; } /****************************************************************************** * * hypre_IJMatrixSetRowSizesParCSR * *****************************************************************************/ HYPRE_Int hypre_IJMatrixSetRowSizesParCSR(hypre_IJMatrix *matrix, const HYPRE_Int *sizes) { HYPRE_Int local_num_rows, local_num_cols, i, *row_space = NULL; HYPRE_BigInt *row_partitioning = hypre_IJMatrixRowPartitioning(matrix); HYPRE_BigInt *col_partitioning = hypre_IJMatrixColPartitioning(matrix); local_num_rows = (HYPRE_Int)(row_partitioning[1]-row_partitioning[0]); local_num_cols = (HYPRE_Int)(col_partitioning[1]-col_partitioning[0]); hypre_AuxParCSRMatrix *aux_matrix = (hypre_AuxParCSRMatrix *) hypre_IJMatrixTranslator(matrix); if (aux_matrix) { row_space = hypre_AuxParCSRMatrixRowSpace(aux_matrix); } if (!row_space) { row_space = hypre_CTAlloc(HYPRE_Int, local_num_rows, HYPRE_MEMORY_HOST); } for (i = 0; i < local_num_rows; i++) { row_space[i] = sizes[i]; } if (!aux_matrix) { hypre_AuxParCSRMatrixCreate(&aux_matrix, local_num_rows, local_num_cols, row_space); hypre_IJMatrixTranslator(matrix) = aux_matrix; } hypre_AuxParCSRMatrixRowSpace(aux_matrix) = row_space; #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_AuxParCSRMatrixUsrOnProcElmts(aux_matrix) = 0; for (i = 0; i < local_num_rows; i++) { hypre_AuxParCSRMatrixUsrOnProcElmts(aux_matrix) += sizes[i]; } #endif return hypre_error_flag; } /****************************************************************************** * * hypre_IJMatrixSetDiagOffdSizesParCSR * sets diag_i inside the diag part of the ParCSRMatrix * and offd_i inside the offd part, * requires exact row sizes for diag and offd * *****************************************************************************/ HYPRE_Int hypre_IJMatrixSetDiagOffdSizesParCSR(hypre_IJMatrix *matrix, const HYPRE_Int *diag_sizes, const HYPRE_Int *offd_sizes) { HYPRE_Int local_num_rows, local_num_cols; HYPRE_BigInt *row_partitioning = hypre_IJMatrixRowPartitioning(matrix); HYPRE_BigInt *col_partitioning = hypre_IJMatrixColPartitioning(matrix); local_num_rows = (HYPRE_Int)(row_partitioning[1]-row_partitioning[0]); local_num_cols = (HYPRE_Int)(col_partitioning[1]-col_partitioning[0]); hypre_AuxParCSRMatrix *aux_matrix = (hypre_AuxParCSRMatrix *)hypre_IJMatrixTranslator(matrix); if (!aux_matrix) { hypre_AuxParCSRMatrixCreate(&aux_matrix, local_num_rows, local_num_cols, NULL); hypre_IJMatrixTranslator(matrix) = aux_matrix; } if ( hypre_AuxParCSRMatrixDiagSizes(aux_matrix) == NULL) { hypre_AuxParCSRMatrixDiagSizes(aux_matrix) = hypre_TAlloc(HYPRE_Int, local_num_rows, HYPRE_MEMORY_HOST); } if ( hypre_AuxParCSRMatrixOffdSizes(aux_matrix) == NULL) { hypre_AuxParCSRMatrixOffdSizes(aux_matrix) = hypre_TAlloc(HYPRE_Int, local_num_rows, HYPRE_MEMORY_HOST); } hypre_TMemcpy(hypre_AuxParCSRMatrixDiagSizes(aux_matrix), diag_sizes, HYPRE_Int, local_num_rows, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); hypre_TMemcpy(hypre_AuxParCSRMatrixOffdSizes(aux_matrix), offd_sizes, HYPRE_Int, local_num_rows, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); hypre_AuxParCSRMatrixNeedAux(aux_matrix) = 0; return hypre_error_flag; } /****************************************************************************** * * hypre_IJMatrixSetMaxOnProcElmtsParCSR * *****************************************************************************/ HYPRE_Int hypre_IJMatrixSetMaxOnProcElmtsParCSR(hypre_IJMatrix *matrix, HYPRE_Int max_on_proc_elmts) { #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_AuxParCSRMatrix *aux_matrix; HYPRE_Int local_num_rows, local_num_cols, my_id; HYPRE_BigInt *row_partitioning = hypre_IJMatrixRowPartitioning(matrix); HYPRE_BigInt *col_partitioning = hypre_IJMatrixColPartitioning(matrix); MPI_Comm comm = hypre_IJMatrixComm(matrix); hypre_MPI_Comm_rank(comm,&my_id); aux_matrix = (hypre_AuxParCSRMatrix *) hypre_IJMatrixTranslator(matrix); if (!aux_matrix) { local_num_rows = (HYPRE_Int)(row_partitioning[1]-row_partitioning[0]); local_num_cols = (HYPRE_Int)(col_partitioning[1]-col_partitioning[0]); hypre_AuxParCSRMatrixCreate(&aux_matrix, local_num_rows, local_num_cols, NULL); hypre_IJMatrixTranslator(matrix) = aux_matrix; } hypre_AuxParCSRMatrixUsrOnProcElmts(aux_matrix) = max_on_proc_elmts; #endif return hypre_error_flag; } /****************************************************************************** * * hypre_IJMatrixSetMaxOffProcElmtsParCSR * *****************************************************************************/ HYPRE_Int hypre_IJMatrixSetMaxOffProcElmtsParCSR(hypre_IJMatrix *matrix, HYPRE_Int max_off_proc_elmts) { hypre_AuxParCSRMatrix *aux_matrix; HYPRE_Int local_num_rows, local_num_cols, my_id; HYPRE_BigInt *row_partitioning = hypre_IJMatrixRowPartitioning(matrix); HYPRE_BigInt *col_partitioning = hypre_IJMatrixColPartitioning(matrix); MPI_Comm comm = hypre_IJMatrixComm(matrix); hypre_MPI_Comm_rank(comm,&my_id); aux_matrix = (hypre_AuxParCSRMatrix *) hypre_IJMatrixTranslator(matrix); if (!aux_matrix) { local_num_rows = (HYPRE_Int)(row_partitioning[1]-row_partitioning[0]); local_num_cols = (HYPRE_Int)(col_partitioning[1]-col_partitioning[0]); hypre_AuxParCSRMatrixCreate(&aux_matrix, local_num_rows, local_num_cols, NULL); hypre_IJMatrixTranslator(matrix) = aux_matrix; } hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix) = max_off_proc_elmts; #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_AuxParCSRMatrixUsrOffProcElmts(aux_matrix) = max_off_proc_elmts; #endif return hypre_error_flag; } /****************************************************************************** * * hypre_IJMatrixInitializeParCSR * * initializes AuxParCSRMatrix and ParCSRMatrix as necessary * *****************************************************************************/ HYPRE_Int hypre_IJMatrixInitializeParCSR(hypre_IJMatrix *matrix) { return hypre_IJMatrixInitializeParCSR_v2(matrix, hypre_HandleMemoryLocation(hypre_handle())); } HYPRE_Int hypre_IJMatrixInitializeParCSR_v2(hypre_IJMatrix *matrix, HYPRE_MemoryLocation memory_location) { hypre_ParCSRMatrix *par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject(matrix); hypre_AuxParCSRMatrix *aux_matrix = (hypre_AuxParCSRMatrix *) hypre_IJMatrixTranslator(matrix); HYPRE_MemoryLocation memory_location_aux = hypre_GetExecPolicy1(memory_location) == HYPRE_EXEC_HOST ? HYPRE_MEMORY_HOST : HYPRE_MEMORY_DEVICE; if (hypre_IJMatrixAssembleFlag(matrix) == 0) { if (!par_matrix) { hypre_IJMatrixCreateParCSR(matrix); par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject(matrix); } HYPRE_Int local_num_rows = hypre_ParCSRMatrixNumRows(par_matrix); HYPRE_Int i; hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(par_matrix); hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(par_matrix); if (!aux_matrix) { hypre_AuxParCSRMatrixCreate(&aux_matrix, local_num_rows, hypre_ParCSRMatrixNumCols(par_matrix), NULL); hypre_IJMatrixTranslator(matrix) = aux_matrix; } hypre_ParCSRMatrixInitialize_v2(par_matrix, memory_location); hypre_AuxParCSRMatrixInitialize_v2(aux_matrix, memory_location_aux); if (memory_location_aux == HYPRE_MEMORY_HOST) { if (hypre_AuxParCSRMatrixDiagSizes(aux_matrix)) { for (i = 0; i < local_num_rows; i++) { hypre_CSRMatrixI(diag)[i+1] = hypre_CSRMatrixI(diag)[i] + hypre_AuxParCSRMatrixDiagSizes(aux_matrix)[i]; } hypre_CSRMatrixNumNonzeros(diag) = hypre_CSRMatrixI(diag)[local_num_rows]; hypre_CSRMatrixInitialize(diag); } if (hypre_AuxParCSRMatrixOffdSizes(aux_matrix)) { for (i = 0; i < local_num_rows; i++) { hypre_CSRMatrixI(offd)[i+1] = hypre_CSRMatrixI(offd)[i] + hypre_AuxParCSRMatrixOffdSizes(aux_matrix)[i]; } hypre_CSRMatrixNumNonzeros(offd) = hypre_CSRMatrixI(offd)[local_num_rows]; hypre_CSRMatrixInitialize(offd); } } if (!hypre_AuxParCSRMatrixNeedAux(aux_matrix)) { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < local_num_rows; i++) { hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[i] = hypre_CSRMatrixI(diag)[i]; hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[i] = hypre_CSRMatrixI(offd)[i]; } } } else if ( memory_location_aux == HYPRE_MEMORY_HOST ) { /* AB 4/06 - the assemble routine destroys the aux matrix - so we need to recreate if initialize is called again */ if (!aux_matrix) { hypre_AuxParCSRMatrixCreate(&aux_matrix, hypre_ParCSRMatrixNumRows(par_matrix), hypre_ParCSRMatrixNumCols(par_matrix), NULL); hypre_AuxParCSRMatrixMemoryLocation(aux_matrix) = HYPRE_MEMORY_HOST; hypre_AuxParCSRMatrixNeedAux(aux_matrix) = 0; hypre_IJMatrixTranslator(matrix) = aux_matrix; } } return hypre_error_flag; } /****************************************************************************** * * hypre_IJMatrixGetRowCountsParCSR * * gets the number of columns for rows specified by the user * *****************************************************************************/ HYPRE_Int hypre_IJMatrixGetRowCountsParCSR( hypre_IJMatrix *matrix, HYPRE_Int nrows, HYPRE_BigInt *rows, HYPRE_Int *ncols) { HYPRE_BigInt row_index; MPI_Comm comm = hypre_IJMatrixComm(matrix); hypre_ParCSRMatrix *par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject(matrix); HYPRE_BigInt *row_partitioning = hypre_IJMatrixRowPartitioning(matrix); hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(par_matrix); HYPRE_Int *diag_i = hypre_CSRMatrixI(diag); hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(par_matrix); HYPRE_Int *offd_i = hypre_CSRMatrixI(offd); HYPRE_Int i, my_id, pstart, index; HYPRE_Int print_level = hypre_IJMatrixPrintLevel(matrix); hypre_MPI_Comm_rank(comm,&my_id); pstart = 0; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i, row_index) HYPRE_SMP_SCHEDULE #endif for (i=0; i < nrows; i++) { row_index = rows[i]; if (row_index >= row_partitioning[pstart] && row_index < row_partitioning[pstart+1]) { /* compute local row number */ index = (HYPRE_Int)(row_index - row_partitioning[pstart]); ncols[i] = diag_i[index+1]-diag_i[index]+offd_i[index+1]-offd_i[index]; } else { ncols[i] = 0; if (print_level) { hypre_printf ("Warning! Row %b is not on Proc. %d!\n", row_index, my_id); } } } return hypre_error_flag; } /****************************************************************************** * * hypre_IJMatrixGetValuesParCSR * * gets values of an IJMatrix * *****************************************************************************/ HYPRE_Int hypre_IJMatrixGetValuesParCSR( hypre_IJMatrix *matrix, HYPRE_Int nrows, HYPRE_Int *ncols, HYPRE_BigInt *rows, HYPRE_BigInt *cols, HYPRE_Complex *values) { MPI_Comm comm = hypre_IJMatrixComm(matrix); hypre_ParCSRMatrix *par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject(matrix); HYPRE_Int assemble_flag = hypre_IJMatrixAssembleFlag(matrix); hypre_CSRMatrix *diag; HYPRE_Int *diag_i; HYPRE_Int *diag_j; HYPRE_Complex *diag_data; hypre_CSRMatrix *offd; HYPRE_Int *offd_i; HYPRE_Int *offd_j; HYPRE_Complex *offd_data; HYPRE_BigInt *col_map_offd; HYPRE_BigInt *col_starts = hypre_ParCSRMatrixColStarts(par_matrix); HYPRE_BigInt *row_partitioning = hypre_IJMatrixRowPartitioning(matrix); HYPRE_Int i, j, n, ii, indx, pstart; HYPRE_Int num_procs, my_id; HYPRE_BigInt col_0, col_n, row, col_indx, first; HYPRE_Int row_local, row_size; HYPRE_Int warning = 0; HYPRE_Int *counter; HYPRE_Int print_level = hypre_IJMatrixPrintLevel(matrix); hypre_MPI_Comm_size(comm,&num_procs); hypre_MPI_Comm_rank(comm,&my_id); if (assemble_flag == 0) { hypre_error_in_arg(1); if (print_level) { hypre_printf("Error! Matrix not assembled yet! HYPRE_IJMatrixGetValues\n"); } } col_0 = col_starts[0]; col_n = col_starts[1]-1; first = hypre_IJMatrixGlobalFirstCol(matrix); pstart = 0; diag = hypre_ParCSRMatrixDiag(par_matrix); diag_i = hypre_CSRMatrixI(diag); diag_j = hypre_CSRMatrixJ(diag); diag_data = hypre_CSRMatrixData(diag); offd = hypre_ParCSRMatrixOffd(par_matrix); offd_i = hypre_CSRMatrixI(offd); if (num_procs > 1) { offd_j = hypre_CSRMatrixJ(offd); offd_data = hypre_CSRMatrixData(offd); col_map_offd = hypre_ParCSRMatrixColMapOffd(par_matrix); } if (nrows < 0) { nrows = -nrows; counter = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); counter[0] = 0; for (i=0; i < nrows; i++) { counter[i+1] = counter[i]+ncols[i]; } indx = 0; for (i=0; i < nrows; i++) { row = rows[i]; if (row >= row_partitioning[pstart] && row < row_partitioning[pstart+1]) { row_local = (HYPRE_Int)(row - row_partitioning[pstart]); row_size = diag_i[row_local+1] - diag_i[row_local] + offd_i[row_local+1] - offd_i[row_local]; if (counter[i]+row_size > counter[nrows]) { hypre_error_in_arg(1); if (print_level) { hypre_printf ("Error! Not enough memory! HYPRE_IJMatrixGetValues\n"); } } if (ncols[i] < row_size) { warning = 1; } for (j = diag_i[row_local]; j < diag_i[row_local+1]; j++) { cols[indx] = (HYPRE_BigInt)diag_j[j] + col_0; values[indx++] = diag_data[j]; } for (j = offd_i[row_local]; j < offd_i[row_local+1]; j++) { cols[indx] = col_map_offd[offd_j[j]]; values[indx++] = offd_data[j]; } counter[i+1] = indx; } else { if (print_level) { hypre_printf ("Warning! Row %b is not on Proc. %d!\n", row, my_id); } } } if (warning) { for (i=0; i < nrows; i++) { ncols[i] = counter[i+1] - counter[i]; } if (print_level) { hypre_printf ("Warning! ncols has been changed!\n"); } } hypre_TFree(counter, HYPRE_MEMORY_HOST); } else { indx = 0; for (ii=0; ii < nrows; ii++) { row = rows[ii]; n = ncols[ii]; if (n == 0) /* empty row */ { continue; } if (row >= row_partitioning[pstart] && row < row_partitioning[pstart+1]) { row_local = (HYPRE_Int)(row - row_partitioning[pstart]); /* compute local row number */ for (i=0; i < n; i++) { col_indx = cols[indx] - first; values[indx] = 0.0; if (col_indx < col_0 || col_indx > col_n) /* search in offd */ { for (j=offd_i[row_local]; j < offd_i[row_local+1]; j++) { if (col_map_offd[offd_j[j]] == col_indx) { values[indx] = offd_data[j]; break; } } } else /* search in diag */ { col_indx = col_indx - col_0; for (j=diag_i[row_local]; j < diag_i[row_local+1]; j++) { if (diag_j[j] == (HYPRE_Int)col_indx) { values[indx] = diag_data[j]; break; } } } indx++; } } else { if (print_level) { hypre_printf ("Warning! Row %b is not on Proc. %d!\n", row, my_id); } } } } return hypre_error_flag; } /****************************************************************************** * * hypre_IJMatrixSetValuesParCSR * * sets values in an IJMatrix before assembly, * *****************************************************************************/ HYPRE_Int hypre_IJMatrixSetValuesParCSR( hypre_IJMatrix *matrix, HYPRE_Int nrows, HYPRE_Int *ncols, const HYPRE_BigInt *rows, const HYPRE_Int *row_indexes, const HYPRE_BigInt *cols, const HYPRE_Complex *values ) { hypre_ParCSRMatrix *par_matrix; hypre_CSRMatrix *diag, *offd; hypre_AuxParCSRMatrix *aux_matrix; HYPRE_BigInt *row_partitioning; HYPRE_BigInt *col_partitioning; MPI_Comm comm = hypre_IJMatrixComm(matrix); HYPRE_Int num_procs, my_id; HYPRE_Int row_local; //HYPRE_Int row_len; HYPRE_BigInt col_0, col_n, row; HYPRE_Int i, ii, j, n, not_found; //HYPRE_Int col_indx, cnt1; HYPRE_BigInt **aux_j; HYPRE_BigInt *local_j; HYPRE_BigInt *tmp_j; HYPRE_Complex **aux_data; HYPRE_Complex *local_data; HYPRE_Complex *tmp_data; HYPRE_Int diag_space, offd_space; HYPRE_Int *row_length, *row_space; HYPRE_Int need_aux; HYPRE_Int tmp_indx, indx; HYPRE_Int space, size, old_size; HYPRE_Int cnt, cnt_diag, cnt_offd; HYPRE_Int pos_diag, pos_offd; HYPRE_Int len_diag, len_offd; HYPRE_Int offd_indx, diag_indx; HYPRE_Int *diag_i; HYPRE_Int *diag_j; HYPRE_Complex *diag_data; HYPRE_Int *offd_i; HYPRE_Int *offd_j; HYPRE_Complex *offd_data; HYPRE_BigInt first; HYPRE_Int pstart; /*HYPRE_Int current_num_elmts;*/ /*HYPRE_Int max_off_proc_elmts;*/ //HYPRE_Int off_proc_i_indx; //HYPRE_BigInt *off_proc_i; //HYPRE_BigInt *off_proc_j; HYPRE_Int print_level = hypre_IJMatrixPrintLevel(matrix); /*HYPRE_Complex *off_proc_data;*/ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject( matrix ); row_partitioning = hypre_IJMatrixRowPartitioning(matrix); col_partitioning = hypre_IJMatrixColPartitioning(matrix); col_0 = col_partitioning[0]; col_n = col_partitioning[1]-1; first = hypre_IJMatrixGlobalFirstCol(matrix); pstart = 0; if (nrows < 0) { hypre_error_in_arg(2); if (print_level) { hypre_printf("Error! nrows negative! HYPRE_IJMatrixSetValues\n"); } } if (hypre_IJMatrixAssembleFlag(matrix)) /* matrix already assembled*/ { HYPRE_BigInt *col_map_offd; HYPRE_Int num_cols_offd; HYPRE_Int j_offd; for (ii=0; ii < nrows; ii++) { row = rows[ii]; n = ncols ? ncols[ii] : 1; if (n == 0) /* empty row */ { continue; } indx = row_indexes[ii]; /* processor owns the row */ if (row >= row_partitioning[pstart] && row < row_partitioning[pstart+1]) { row_local = (HYPRE_Int)(row - row_partitioning[pstart]); /* compute local row number */ diag = hypre_ParCSRMatrixDiag(par_matrix); diag_i = hypre_CSRMatrixI(diag); diag_j = hypre_CSRMatrixJ(diag); diag_data = hypre_CSRMatrixData(diag); offd = hypre_ParCSRMatrixOffd(par_matrix); offd_i = hypre_CSRMatrixI(offd); num_cols_offd = hypre_CSRMatrixNumCols(offd); if (num_cols_offd) { col_map_offd = hypre_ParCSRMatrixColMapOffd(par_matrix); offd_j = hypre_CSRMatrixJ(offd); offd_data = hypre_CSRMatrixData(offd); } size = diag_i[row_local+1] - diag_i[row_local] + offd_i[row_local+1] - offd_i[row_local]; if (n > size) /* Should we change this and allow this? This could be same column index, i.e. only last value is set, previous ones overwritten. */ { hypre_error(HYPRE_ERROR_GENERIC); if (print_level) { hypre_printf (" row %b too long! \n", row); } return hypre_error_flag; } pos_diag = diag_i[row_local]; pos_offd = offd_i[row_local]; len_diag = diag_i[row_local+1]; len_offd = offd_i[row_local+1]; not_found = 1; for (i=0; i < n; i++) { if (cols[indx] < col_0 || cols[indx] > col_n) /* insert into offd */ { j_offd = hypre_BigBinarySearch(col_map_offd,cols[indx]-first, num_cols_offd); if (j_offd == -1) { hypre_error(HYPRE_ERROR_GENERIC); if (print_level) { hypre_printf (" Error, element %b %b does not exist\n", row, cols[indx]); } return hypre_error_flag; } for (j=pos_offd; j < len_offd; j++) { if (offd_j[j] == j_offd) { offd_data[j] = values[indx]; not_found = 0; break; } } if (not_found) { hypre_error(HYPRE_ERROR_GENERIC); if (print_level) { hypre_printf (" Error, element %b %b does not exist\n", row, cols[indx]); } return hypre_error_flag; } not_found = 1; } /* diagonal element */ else if (cols[indx] == row) { if (diag_j[pos_diag] != row_local) { hypre_error(HYPRE_ERROR_GENERIC); if (print_level) { hypre_printf (" Error, element %b %b does not exist\n", row, cols[indx]); } /* return -1;*/ return hypre_error_flag; } diag_data[pos_diag] = values[indx]; } else /* insert into diag */ { for (j=pos_diag; j < len_diag; j++) { if (diag_j[j] == (HYPRE_Int)(cols[indx]-col_0)) { diag_data[j] = values[indx]; not_found = 0; break; } } if (not_found) { hypre_error(HYPRE_ERROR_GENERIC); if (print_level) { hypre_printf (" Error, element %b %b does not exist\n", row, cols[indx]); } /* return -1; */ return hypre_error_flag; } } indx++; } } } } else { aux_matrix = (hypre_AuxParCSRMatrix *) hypre_IJMatrixTranslator(matrix); row_space = hypre_AuxParCSRMatrixRowSpace(aux_matrix); row_length = hypre_AuxParCSRMatrixRowLength(aux_matrix); need_aux = hypre_AuxParCSRMatrixNeedAux(aux_matrix); for (ii=0; ii < nrows; ii++) { row = rows[ii]; n = ncols ? ncols[ii] : 1; if (n == 0) /* empty row */ { continue; } indx = row_indexes[ii]; /* processor owns the row */ if (row >= row_partitioning[pstart] && row < row_partitioning[pstart+1]) { row_local = (HYPRE_Int)(row - row_partitioning[pstart]); /* compute local row number */ if (need_aux) { aux_j = hypre_AuxParCSRMatrixAuxJ(aux_matrix); aux_data = hypre_AuxParCSRMatrixAuxData(aux_matrix); local_j = aux_j[row_local]; local_data = aux_data[row_local]; space = row_space[row_local]; old_size = row_length[row_local]; size = space - old_size; if (size < n) { size = n - size; tmp_j = hypre_CTAlloc(HYPRE_BigInt, size, HYPRE_MEMORY_HOST); tmp_data = hypre_CTAlloc(HYPRE_Complex, size, HYPRE_MEMORY_HOST); } else { tmp_j = NULL; } tmp_indx = 0; not_found = 1; size = old_size; for (i=0; i < n; i++) { for (j=0; j < old_size; j++) { if (local_j[j] == cols[indx]) { local_data[j] = values[indx]; not_found = 0; break; } } if (not_found) { if (size < space) { local_j[size] = cols[indx]; local_data[size++] = values[indx]; } else { tmp_j[tmp_indx] = cols[indx]; tmp_data[tmp_indx++] = values[indx]; } } not_found = 1; indx++; } row_length[row_local] = size+tmp_indx; if (tmp_indx) { aux_j[row_local] = hypre_TReAlloc(aux_j[row_local], HYPRE_BigInt, size+tmp_indx, HYPRE_MEMORY_HOST); aux_data[row_local] = hypre_TReAlloc(aux_data[row_local], HYPRE_Complex, size+tmp_indx, HYPRE_MEMORY_HOST); row_space[row_local] = size+tmp_indx; local_j = aux_j[row_local]; local_data = aux_data[row_local]; } cnt = size; for (i=0; i < tmp_indx; i++) { local_j[cnt] = tmp_j[i]; local_data[cnt++] = tmp_data[i]; } if (tmp_j) { hypre_TFree(tmp_j, HYPRE_MEMORY_HOST); hypre_TFree(tmp_data, HYPRE_MEMORY_HOST); } } else /* insert immediately into data in ParCSRMatrix structure */ { HYPRE_BigInt *big_offd_j; HYPRE_Int col_j; offd_indx =hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local]; diag_indx =hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local]; diag = hypre_ParCSRMatrixDiag(par_matrix); diag_i = hypre_CSRMatrixI(diag); diag_j = hypre_CSRMatrixJ(diag); diag_data = hypre_CSRMatrixData(diag); offd = hypre_ParCSRMatrixOffd(par_matrix); offd_i = hypre_CSRMatrixI(offd); if (num_procs > 1) { big_offd_j = hypre_CSRMatrixBigJ(offd); offd_data = hypre_CSRMatrixData(offd); if (!big_offd_j) { big_offd_j = hypre_CTAlloc(HYPRE_BigInt, offd_i[hypre_CSRMatrixNumRows(offd)], hypre_CSRMatrixMemoryLocation(offd)); hypre_CSRMatrixBigJ(offd) = big_offd_j; } } cnt_diag = diag_indx; cnt_offd = offd_indx; diag_space = diag_i[row_local+1]; offd_space = offd_i[row_local+1]; not_found = 1; for (i=0; i < n; i++) { if (cols[indx] < col_0 || cols[indx] > col_n) /* insert into offd */ { for (j=offd_i[row_local]; j < offd_indx; j++) { if (big_offd_j[j] == cols[indx]) { offd_data[j] = values[indx]; not_found = 0; break; } } if (not_found) { if (cnt_offd < offd_space) { big_offd_j[cnt_offd] = cols[indx]; offd_data[cnt_offd++] = values[indx]; } else { hypre_error(HYPRE_ERROR_GENERIC); if (print_level) { hypre_printf("Error in row %b ! Too many elements!\n", row); } /* return 1; */ return hypre_error_flag; } } not_found = 1; } else /* insert into diag */ { col_j = (HYPRE_Int)(cols[indx]-col_0); for (j=diag_i[row_local]; j < diag_indx; j++) { if (diag_j[j] == col_j) { diag_data[j] = values[indx]; not_found = 0; break; } } if (not_found) { if (cnt_diag < diag_space) { diag_j[cnt_diag] = col_j; diag_data[cnt_diag++] = values[indx]; } else { hypre_error(HYPRE_ERROR_GENERIC); if (print_level) { hypre_printf("Error in row %b ! Too many elements !\n", row); } /* return 1; */ return hypre_error_flag; } } not_found = 1; } indx++; } hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local] = cnt_diag; hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local] = cnt_offd; } } } } return hypre_error_flag; } /****************************************************************************** * * hypre_IJMatrixSetConstantValuesParCSR * * sets all values in an already assembled IJMatrix to a constant value. * *****************************************************************************/ void hypre_IJMatrixSetConstantValuesParCSRHost( hypre_IJMatrix *matrix, HYPRE_Complex value ) { hypre_ParCSRMatrix *par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject( matrix ); hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(par_matrix); hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(par_matrix); HYPRE_Complex *diag_data = hypre_CSRMatrixData(diag); HYPRE_Complex *offd_data = hypre_CSRMatrixData(offd); HYPRE_Int nnz_diag = hypre_CSRMatrixNumNonzeros(diag); HYPRE_Int nnz_offd = hypre_CSRMatrixNumNonzeros(offd); HYPRE_Int ii; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(ii) HYPRE_SMP_SCHEDULE #endif for (ii = 0; ii < nnz_diag; ii++) { diag_data[ii] = value; } #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(ii) HYPRE_SMP_SCHEDULE #endif for (ii = 0; ii < nnz_offd; ii++) { offd_data[ii] = value; } } HYPRE_Int hypre_IJMatrixSetConstantValuesParCSR( hypre_IJMatrix *matrix, HYPRE_Complex value ) { if (hypre_IJMatrixAssembleFlag(matrix)) /* matrix already assembled*/ { #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) if (hypre_GetExecPolicy1(hypre_IJMatrixMemoryLocation(matrix)) == HYPRE_EXEC_DEVICE) { hypre_IJMatrixSetConstantValuesParCSRDevice(matrix, value); } else #endif { hypre_IJMatrixSetConstantValuesParCSRHost(matrix, value); } } else { hypre_error_w_msg(HYPRE_ERROR_GENERIC, "Matrix not assembled! Required to set constant values!"); } return hypre_error_flag; } /****************************************************************************** * * hypre_IJMatrixAddToValuesParCSR * * adds row values to an IJMatrix * *****************************************************************************/ HYPRE_Int hypre_IJMatrixAddToValuesParCSR( hypre_IJMatrix *matrix, HYPRE_Int nrows, HYPRE_Int *ncols, const HYPRE_BigInt *rows, const HYPRE_Int *row_indexes, const HYPRE_BigInt *cols, const HYPRE_Complex *values ) { hypre_ParCSRMatrix *par_matrix; hypre_CSRMatrix *diag, *offd; hypre_AuxParCSRMatrix *aux_matrix; HYPRE_BigInt *row_partitioning; HYPRE_BigInt *col_partitioning; MPI_Comm comm = hypre_IJMatrixComm(matrix); HYPRE_Int num_procs, my_id; HYPRE_Int row_local; HYPRE_BigInt row; HYPRE_BigInt col_0, col_n; HYPRE_Int i, ii, j, n, not_found; HYPRE_BigInt **aux_j; HYPRE_BigInt *local_j; HYPRE_BigInt *tmp_j; HYPRE_Complex **aux_data; HYPRE_Complex *local_data; HYPRE_Complex *tmp_data; HYPRE_Int diag_space, offd_space; HYPRE_Int *row_length, *row_space; HYPRE_Int need_aux; HYPRE_Int tmp_indx, indx; HYPRE_Int space, size, old_size; HYPRE_Int cnt, cnt_diag, cnt_offd; HYPRE_Int pos_diag, pos_offd; HYPRE_Int len_diag, len_offd; HYPRE_Int offd_indx, diag_indx; HYPRE_BigInt first; HYPRE_Int pstart; HYPRE_Int *diag_i; HYPRE_Int *diag_j; HYPRE_Complex *diag_data; HYPRE_Int *offd_i; HYPRE_Int *offd_j; HYPRE_Complex *offd_data; HYPRE_Int current_num_elmts; HYPRE_Int max_off_proc_elmts; HYPRE_Int off_proc_i_indx; HYPRE_BigInt *off_proc_i; HYPRE_BigInt *off_proc_j; HYPRE_Complex *off_proc_data; HYPRE_Int print_level = hypre_IJMatrixPrintLevel(matrix); hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject( matrix ); row_partitioning = hypre_IJMatrixRowPartitioning(matrix); col_partitioning = hypre_IJMatrixColPartitioning(matrix); col_0 = col_partitioning[0]; col_n = col_partitioning[1]-1; first = hypre_IJMatrixGlobalFirstCol(matrix); pstart = 0; if (hypre_IJMatrixAssembleFlag(matrix)) { HYPRE_Int num_cols_offd; HYPRE_BigInt *col_map_offd; HYPRE_Int j_offd; /* AB - 4/06 - need to get this object*/ aux_matrix = (hypre_AuxParCSRMatrix *) hypre_IJMatrixTranslator(matrix); for (ii=0; ii < nrows; ii++) { row = rows[ii]; n = ncols ? ncols[ii] : 1; if (n == 0) /* empty row */ { continue; } indx = row_indexes[ii]; if (row >= row_partitioning[pstart] && row < row_partitioning[pstart+1]) { row_local = (HYPRE_Int)(row - row_partitioning[pstart]); /* compute local row number */ diag = hypre_ParCSRMatrixDiag(par_matrix); diag_i = hypre_CSRMatrixI(diag); diag_j = hypre_CSRMatrixJ(diag); diag_data = hypre_CSRMatrixData(diag); offd = hypre_ParCSRMatrixOffd(par_matrix); offd_i = hypre_CSRMatrixI(offd); num_cols_offd = hypre_CSRMatrixNumCols(offd); if (num_cols_offd) { col_map_offd = hypre_ParCSRMatrixColMapOffd(par_matrix); offd_j = hypre_CSRMatrixJ(offd); offd_data = hypre_CSRMatrixData(offd); } size = diag_i[row_local+1] - diag_i[row_local] + offd_i[row_local+1] - offd_i[row_local]; if (n > size) /* Should we change this and allow this? This could be same column index, i.e. only last value is set, previous ones overwritten. */ { hypre_error(HYPRE_ERROR_GENERIC); if (print_level) { hypre_printf (" row %b too long! \n", row); } return hypre_error_flag; } pos_diag = diag_i[row_local]; pos_offd = offd_i[row_local]; len_diag = diag_i[row_local+1]; len_offd = offd_i[row_local+1]; not_found = 1; for (i=0; i < n; i++) { if (cols[indx] < col_0 || cols[indx] > col_n) /* insert into offd */ { j_offd = hypre_BigBinarySearch(col_map_offd,cols[indx]-first, num_cols_offd); if (j_offd == -1) { hypre_error(HYPRE_ERROR_GENERIC); if (print_level) { hypre_printf (" Error, element %b %b does not exist\n", row, cols[indx]); } return hypre_error_flag; /* return -1; */ } for (j=pos_offd; j < len_offd; j++) { if (offd_j[j] == j_offd) { offd_data[j] += values[indx]; not_found = 0; break; } } if (not_found) { hypre_error(HYPRE_ERROR_GENERIC); if (print_level) { hypre_printf (" Error, element %b %b does not exist\n", row, cols[indx]); } return hypre_error_flag; } not_found = 1; } /* diagonal element */ else if (cols[indx] == row) { if (diag_j[pos_diag] != row_local) { hypre_error(HYPRE_ERROR_GENERIC); if (print_level) { hypre_printf (" Error, element %b %b does not exist\n", row, cols[indx]); } return hypre_error_flag; } diag_data[pos_diag] += values[indx]; } else /* insert into diag */ { for (j=pos_diag; j < len_diag; j++) { if (diag_j[j] == (HYPRE_Int)(cols[indx]-col_0)) { diag_data[j] += values[indx]; not_found = 0; break; } } if (not_found) { hypre_error(HYPRE_ERROR_GENERIC); if (print_level) { hypre_printf (" Error, element %b %b does not exist\n", row, cols[indx]); } return hypre_error_flag; } } indx++; } } /* not my row */ else { if (!aux_matrix) { size = (HYPRE_Int)(row_partitioning[pstart+1]-row_partitioning[pstart]); hypre_AuxParCSRMatrixCreate(&aux_matrix, size, size, NULL); hypre_AuxParCSRMatrixNeedAux(aux_matrix) = 0; hypre_IJMatrixTranslator(matrix) = aux_matrix; } current_num_elmts = hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix); max_off_proc_elmts = hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix); off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix); off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix); off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix); off_proc_data = hypre_AuxParCSRMatrixOffProcData(aux_matrix); if (!max_off_proc_elmts) { max_off_proc_elmts = hypre_max(n,1000); hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix) = max_off_proc_elmts; hypre_AuxParCSRMatrixOffProcI(aux_matrix) = hypre_CTAlloc(HYPRE_BigInt, 2*max_off_proc_elmts, HYPRE_MEMORY_HOST); hypre_AuxParCSRMatrixOffProcJ(aux_matrix) = hypre_CTAlloc(HYPRE_BigInt, max_off_proc_elmts, HYPRE_MEMORY_HOST); hypre_AuxParCSRMatrixOffProcData(aux_matrix) = hypre_CTAlloc(HYPRE_Complex, max_off_proc_elmts, HYPRE_MEMORY_HOST); off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix); off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix); off_proc_data = hypre_AuxParCSRMatrixOffProcData(aux_matrix); } else if (current_num_elmts + n > max_off_proc_elmts) { max_off_proc_elmts += 3*n; off_proc_i = hypre_TReAlloc(off_proc_i, HYPRE_BigInt, 2*max_off_proc_elmts, HYPRE_MEMORY_HOST); off_proc_j = hypre_TReAlloc(off_proc_j, HYPRE_BigInt, max_off_proc_elmts, HYPRE_MEMORY_HOST); off_proc_data = hypre_TReAlloc(off_proc_data,HYPRE_Complex, max_off_proc_elmts, HYPRE_MEMORY_HOST); hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix) = max_off_proc_elmts; hypre_AuxParCSRMatrixOffProcI(aux_matrix) = off_proc_i; hypre_AuxParCSRMatrixOffProcJ(aux_matrix) = off_proc_j; hypre_AuxParCSRMatrixOffProcData(aux_matrix) = off_proc_data; } /* AB - 4/6 - the row should be negative to indicate an add */ /* UMY - 12/28/09 - now positive since we eliminated the feature of setting on other processors */ /* off_proc_i[off_proc_i_indx++] = row; */ off_proc_i[off_proc_i_indx++] = row; off_proc_i[off_proc_i_indx++] = n; for (i=0; i < n; i++) { off_proc_j[current_num_elmts] = cols[indx]; off_proc_data[current_num_elmts++] = values[indx++]; } hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix) = off_proc_i_indx; hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix) = current_num_elmts; } } } /* not assembled */ else { aux_matrix = (hypre_AuxParCSRMatrix *) hypre_IJMatrixTranslator(matrix); row_space = hypre_AuxParCSRMatrixRowSpace(aux_matrix); row_length = hypre_AuxParCSRMatrixRowLength(aux_matrix); need_aux = hypre_AuxParCSRMatrixNeedAux(aux_matrix); for (ii=0; ii < nrows; ii++) { row = rows[ii]; n = ncols ? ncols[ii] : 1; if (n == 0) /* empty row */ { continue; } indx = row_indexes[ii]; if (row >= row_partitioning[pstart] && row < row_partitioning[pstart+1]) { row_local = (HYPRE_Int)(row - row_partitioning[pstart]); /* compute local row number */ if (need_aux) { aux_j = hypre_AuxParCSRMatrixAuxJ(aux_matrix); aux_data = hypre_AuxParCSRMatrixAuxData(aux_matrix); local_j = aux_j[row_local]; local_data = aux_data[row_local]; space = row_space[row_local]; old_size = row_length[row_local]; size = space - old_size; if (size < n) { size = n - size; tmp_j = hypre_CTAlloc(HYPRE_BigInt, size, HYPRE_MEMORY_HOST); tmp_data = hypre_CTAlloc(HYPRE_Complex, size, HYPRE_MEMORY_HOST); } else { tmp_j = NULL; } tmp_indx = 0; not_found = 1; size = old_size; for (i=0; i < n; i++) { for (j=0; j < old_size; j++) { if (local_j[j] == cols[indx]) { local_data[j] += values[indx]; not_found = 0; break; } } if (not_found) { if (size < space) { local_j[size] = cols[indx]; local_data[size++] = values[indx]; } else { tmp_j[tmp_indx] = cols[indx]; tmp_data[tmp_indx++] = values[indx]; } } not_found = 1; indx++; } row_length[row_local] = size+tmp_indx; if (tmp_indx) { aux_j[row_local] = hypre_TReAlloc(aux_j[row_local], HYPRE_BigInt, size+tmp_indx, HYPRE_MEMORY_HOST); aux_data[row_local] = hypre_TReAlloc(aux_data[row_local], HYPRE_Complex, size+tmp_indx, HYPRE_MEMORY_HOST); row_space[row_local] = size+tmp_indx; local_j = aux_j[row_local]; local_data = aux_data[row_local]; } cnt = size; for (i=0; i < tmp_indx; i++) { local_j[cnt] = tmp_j[i]; local_data[cnt++] = tmp_data[i]; } if (tmp_j) { hypre_TFree(tmp_j, HYPRE_MEMORY_HOST); hypre_TFree(tmp_data, HYPRE_MEMORY_HOST); } } else /* insert immediately into data in ParCSRMatrix structure */ { HYPRE_BigInt *big_offd_j; offd_indx = hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local]; diag_indx = hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local]; diag = hypre_ParCSRMatrixDiag(par_matrix); diag_i = hypre_CSRMatrixI(diag); diag_j = hypre_CSRMatrixJ(diag); diag_data = hypre_CSRMatrixData(diag); offd = hypre_ParCSRMatrixOffd(par_matrix); offd_i = hypre_CSRMatrixI(offd); if (num_procs > 1) { big_offd_j = hypre_CSRMatrixBigJ(offd); offd_data = hypre_CSRMatrixData(offd); if (!big_offd_j) { big_offd_j = hypre_CTAlloc(HYPRE_BigInt, offd_i[hypre_CSRMatrixNumRows(offd)], hypre_CSRMatrixMemoryLocation(offd)); hypre_CSRMatrixBigJ(offd) = big_offd_j; } } cnt_diag = diag_indx; cnt_offd = offd_indx; diag_space = diag_i[row_local+1]; offd_space = offd_i[row_local+1]; not_found = 1; for (i=0; i < n; i++) { if (cols[indx] < col_0 || cols[indx] > col_n) /* insert into offd */ { for (j=offd_i[row_local]; j < offd_indx; j++) { if (big_offd_j[j] == cols[indx]) { offd_data[j] += values[indx]; not_found = 0; break; } } if (not_found) { if (cnt_offd < offd_space) { big_offd_j[cnt_offd] = cols[indx]; offd_data[cnt_offd++] = values[indx]; } else { hypre_error(HYPRE_ERROR_GENERIC); if (print_level) { hypre_printf("Error in row %b ! Too many elements!\n", row); } /* return 1;*/ return hypre_error_flag; } } not_found = 1; } else /* insert into diag */ { HYPRE_Int col_j = (HYPRE_Int)( cols[indx] - col_0); for (j=diag_i[row_local]; j < diag_indx; j++) { if (diag_j[j] == col_j) { diag_data[j] += values[indx]; not_found = 0; break; } } if (not_found) { if (cnt_diag < diag_space) { diag_j[cnt_diag] = col_j; diag_data[cnt_diag++] = values[indx]; } else { hypre_error(HYPRE_ERROR_GENERIC); if (print_level) { hypre_printf("Error in row %b ! Too many elements !\n", row); } /* return 1; */ return hypre_error_flag; } } not_found = 1; } indx++; } hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local] = cnt_diag; hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local] = cnt_offd; } } /* not my row */ else { current_num_elmts = hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix); max_off_proc_elmts = hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix); off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix); off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix); off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix); off_proc_data = hypre_AuxParCSRMatrixOffProcData(aux_matrix); if (!max_off_proc_elmts) { max_off_proc_elmts = hypre_max(n,1000); hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix) = max_off_proc_elmts; hypre_AuxParCSRMatrixOffProcI(aux_matrix) = hypre_CTAlloc(HYPRE_BigInt, 2*max_off_proc_elmts, HYPRE_MEMORY_HOST); hypre_AuxParCSRMatrixOffProcJ(aux_matrix) = hypre_CTAlloc(HYPRE_BigInt, max_off_proc_elmts, HYPRE_MEMORY_HOST); hypre_AuxParCSRMatrixOffProcData(aux_matrix) = hypre_CTAlloc(HYPRE_Complex, max_off_proc_elmts, HYPRE_MEMORY_HOST); off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix); off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix); off_proc_data = hypre_AuxParCSRMatrixOffProcData(aux_matrix); } else if (current_num_elmts + n > max_off_proc_elmts) { max_off_proc_elmts += 3*n; off_proc_i = hypre_TReAlloc(off_proc_i, HYPRE_BigInt, 2*max_off_proc_elmts, HYPRE_MEMORY_HOST); off_proc_j = hypre_TReAlloc(off_proc_j, HYPRE_BigInt, max_off_proc_elmts, HYPRE_MEMORY_HOST); off_proc_data = hypre_TReAlloc(off_proc_data,HYPRE_Complex, max_off_proc_elmts, HYPRE_MEMORY_HOST); hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix) = max_off_proc_elmts; hypre_AuxParCSRMatrixOffProcI(aux_matrix) = off_proc_i; hypre_AuxParCSRMatrixOffProcJ(aux_matrix) = off_proc_j; hypre_AuxParCSRMatrixOffProcData(aux_matrix) = off_proc_data; } off_proc_i[off_proc_i_indx++] = row; off_proc_i[off_proc_i_indx++] = n; for (i=0; i < n; i++) { off_proc_j[current_num_elmts] = cols[indx]; off_proc_data[current_num_elmts++] = values[indx++]; } hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix) = off_proc_i_indx; hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix) = current_num_elmts; } } } return hypre_error_flag; } /****************************************************************************** * * hypre_IJMatrixDestroyParCSR * * frees an IJMatrix * *****************************************************************************/ HYPRE_Int hypre_IJMatrixDestroyParCSR(hypre_IJMatrix *matrix) { hypre_ParCSRMatrixDestroy((hypre_ParCSRMatrix *)hypre_IJMatrixObject(matrix)); hypre_AuxParCSRMatrixDestroy((hypre_AuxParCSRMatrix*)hypre_IJMatrixTranslator(matrix)); return hypre_error_flag; } /****************************************************************************** * * hypre_IJMatrixAssembleOffProcValsParCSR * * This is for handling set and get values calls to off-proc. entries - * it is called from matrix assemble. There is an alternate version for * when the assumed partition is being used. * *****************************************************************************/ HYPRE_Int hypre_IJMatrixAssembleOffProcValsParCSR( hypre_IJMatrix *matrix, HYPRE_Int off_proc_i_indx, HYPRE_Int max_off_proc_elmts, HYPRE_Int current_num_elmts, HYPRE_MemoryLocation memory_location, HYPRE_BigInt *off_proc_i, HYPRE_BigInt *off_proc_j, HYPRE_Complex *off_proc_data ) { MPI_Comm comm = hypre_IJMatrixComm(matrix); HYPRE_Int i, j, k, in_i; HYPRE_Int myid; HYPRE_Int proc_id, last_proc, prev_id, tmp_id; HYPRE_Int max_response_size; HYPRE_BigInt global_num_cols; HYPRE_BigInt global_first_col; HYPRE_BigInt global_first_row; HYPRE_Int ex_num_contacts = 0, num_rows = 0; HYPRE_BigInt range_start, range_end; HYPRE_Int num_elements; HYPRE_Int storage; HYPRE_Int indx; HYPRE_BigInt row; HYPRE_Int num_ranges, row_index = 0; HYPRE_Int num_recvs; HYPRE_BigInt upper_bound; HYPRE_Int counter; HYPRE_Int num_real_procs; HYPRE_Int /*current_proc,*/ original_proc_indx; HYPRE_BigInt *row_list=NULL; HYPRE_Int *row_list_num_elements=NULL; HYPRE_Int *a_proc_id=NULL, *orig_order=NULL; HYPRE_Int *real_proc_id = NULL, *us_real_proc_id = NULL; HYPRE_Int *ex_contact_procs = NULL, *ex_contact_vec_starts = NULL; HYPRE_BigInt *ex_contact_buf = NULL; HYPRE_Int *recv_starts=NULL; HYPRE_BigInt *response_buf = NULL; HYPRE_Int *response_buf_starts=NULL; HYPRE_Int *num_rows_per_proc = NULL, *num_elements_total = NULL; HYPRE_Int *argsort_contact_procs = NULL; HYPRE_Int obj_size_bytes, complex_size; HYPRE_BigInt big_int_size; HYPRE_Int tmp_int; HYPRE_BigInt tmp_big_int; HYPRE_BigInt *col_ptr; HYPRE_BigInt *big_int_data = NULL; HYPRE_Int big_int_data_size = 0, complex_data_size = 0; void *void_contact_buf = NULL; void *index_ptr; void *recv_data_ptr; HYPRE_Complex tmp_complex; HYPRE_Complex *col_data_ptr; HYPRE_Complex *complex_data = NULL; hypre_DataExchangeResponse response_obj1, response_obj2; hypre_ProcListElements send_proc_obj; hypre_IJAssumedPart *apart; hypre_MPI_Comm_rank(comm, &myid); global_num_cols = hypre_IJMatrixGlobalNumCols(matrix); global_first_col = hypre_IJMatrixGlobalFirstCol(matrix); global_first_row = hypre_IJMatrixGlobalFirstRow(matrix); if (memory_location == HYPRE_MEMORY_DEVICE) { HYPRE_BigInt *tmp = hypre_TAlloc(HYPRE_BigInt, current_num_elmts, HYPRE_MEMORY_HOST); HYPRE_BigInt *off_proc_i_h = hypre_TAlloc(HYPRE_BigInt, 2*current_num_elmts, HYPRE_MEMORY_HOST); HYPRE_BigInt *off_proc_j_h = hypre_TAlloc(HYPRE_BigInt, current_num_elmts, HYPRE_MEMORY_HOST); HYPRE_Complex *off_proc_data_h = hypre_TAlloc(HYPRE_Complex, current_num_elmts, HYPRE_MEMORY_HOST); hypre_TMemcpy(tmp, off_proc_i, HYPRE_BigInt, current_num_elmts, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE); hypre_TMemcpy(off_proc_j_h, off_proc_j, HYPRE_BigInt, current_num_elmts, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE); hypre_TMemcpy(off_proc_data_h, off_proc_data, HYPRE_Complex, current_num_elmts, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE); for (i = 0; i < current_num_elmts; i++) { #if defined(HYPRE_DEBUG) hypre_assert(tmp[i] < hypre_IJMatrixRowPartitioning(matrix)[0] || tmp[i] >= hypre_IJMatrixRowPartitioning(matrix)[1]); hypre_assert(tmp[i] >= global_first_row && tmp[i] < global_first_row + hypre_IJMatrixGlobalNumRows(matrix)); hypre_assert(off_proc_j_h[i] >= global_first_col && off_proc_j_h[i] < global_first_col + global_num_cols); #endif off_proc_i_h[2*i] = tmp[i]; off_proc_i_h[2*i+1] = 1; } off_proc_i_indx = current_num_elmts * 2; off_proc_i = off_proc_i_h; off_proc_j = off_proc_j_h; off_proc_data = off_proc_data_h; hypre_TFree(tmp, HYPRE_MEMORY_HOST); } /* call hypre_IJMatrixAddToValuesParCSR directly inside this function * with one chunk of data */ HYPRE_Int off_proc_nelm_recv_cur = 0; HYPRE_Int off_proc_nelm_recv_max = 0; HYPRE_BigInt *off_proc_i_recv = NULL; HYPRE_BigInt *off_proc_j_recv = NULL; HYPRE_Complex *off_proc_data_recv = NULL; HYPRE_BigInt *off_proc_i_recv_d = NULL; HYPRE_BigInt *off_proc_j_recv_d = NULL; HYPRE_Complex *off_proc_data_recv_d = NULL; num_rows = off_proc_i_indx/2; /* verify that we have created the assumed partition */ if (hypre_IJMatrixAssumedPart(matrix) == NULL) { hypre_IJMatrixCreateAssumedPartition(matrix); } apart = (hypre_IJAssumedPart*) hypre_IJMatrixAssumedPart(matrix); /*if (hypre_ParCSRMatrixAssumedPartition(par_matrix) == NULL) { hypre_ParCSRMatrixCreateAssumedPartition(par_matrix); } apart = hypre_ParCSRMatrixAssumedPartition(par_matrix);*/ row_list = hypre_CTAlloc(HYPRE_BigInt, num_rows, HYPRE_MEMORY_HOST); row_list_num_elements = hypre_CTAlloc(HYPRE_Int, num_rows, HYPRE_MEMORY_HOST); a_proc_id = hypre_CTAlloc(HYPRE_Int, num_rows, HYPRE_MEMORY_HOST); orig_order = hypre_CTAlloc(HYPRE_Int, num_rows, HYPRE_MEMORY_HOST); real_proc_id = hypre_CTAlloc(HYPRE_Int, num_rows, HYPRE_MEMORY_HOST); /* get the assumed processor id for each row */ if (num_rows > 0 ) { for (i=0; i < num_rows; i++) { row = off_proc_i[i*2]; //if (row < 0) row = -row - 1; row_list[i] = row; row_list_num_elements[i] = off_proc_i[i*2+1]; hypre_GetAssumedPartitionProcFromRow(comm, row, global_first_row, global_num_cols, &proc_id); a_proc_id[i] = proc_id; orig_order[i] = i; } /* now we need to find the actual order of each row - sort on row - this will result in proc ids sorted also...*/ hypre_BigQsortb2i(row_list, a_proc_id, orig_order, 0, num_rows -1); /* calculate the number of contacts */ ex_num_contacts = 1; last_proc = a_proc_id[0]; for (i=1; i < num_rows; i++) { if (a_proc_id[i] > last_proc) { ex_num_contacts++; last_proc = a_proc_id[i]; } } } /* now we will go through a create a contact list - need to contact assumed processors and find out who the actual row owner is - we will contact with a range (2 numbers) */ ex_contact_procs = hypre_CTAlloc(HYPRE_Int, ex_num_contacts, HYPRE_MEMORY_HOST); ex_contact_vec_starts = hypre_CTAlloc(HYPRE_Int, ex_num_contacts+1, HYPRE_MEMORY_HOST); ex_contact_buf = hypre_CTAlloc(HYPRE_BigInt, ex_num_contacts*2, HYPRE_MEMORY_HOST); counter = 0; range_end = -1; for (i=0; i< num_rows; i++) { if (row_list[i] > range_end) { /* assumed proc */ proc_id = a_proc_id[i]; /* end of prev. range */ if (counter > 0) { ex_contact_buf[counter*2 - 1] = row_list[i-1]; } /*start new range*/ ex_contact_procs[counter] = proc_id; ex_contact_vec_starts[counter] = counter*2; ex_contact_buf[counter*2] = row_list[i]; counter++; hypre_GetAssumedPartitionRowRange(comm, proc_id, global_first_col, global_num_cols, &range_start, &range_end); } } /* finish the starts */ ex_contact_vec_starts[counter] = counter*2; /* finish the last range */ if (counter > 0) { ex_contact_buf[counter*2 - 1] = row_list[num_rows - 1]; } /* don't allocate space for responses */ /* create response object - can use same fill response as used in the commpkg routine */ response_obj1.fill_response = hypre_RangeFillResponseIJDetermineRecvProcs; response_obj1.data1 = apart; /* this is necessary so we can fill responses*/ response_obj1.data2 = NULL; max_response_size = 6; /* 6 means we can fit 3 ranges*/ hypre_DataExchangeList(ex_num_contacts, ex_contact_procs, ex_contact_buf, ex_contact_vec_starts, sizeof(HYPRE_BigInt), sizeof(HYPRE_BigInt), &response_obj1, max_response_size, 1, comm, (void**) &response_buf, &response_buf_starts); /* now response_buf contains a proc_id followed by a range upper bound */ hypre_TFree(ex_contact_procs, HYPRE_MEMORY_HOST); hypre_TFree(ex_contact_buf, HYPRE_MEMORY_HOST); hypre_TFree(ex_contact_vec_starts, HYPRE_MEMORY_HOST); hypre_TFree(a_proc_id, HYPRE_MEMORY_HOST); /*how many ranges were returned?*/ num_ranges = response_buf_starts[ex_num_contacts]; num_ranges = num_ranges/2; prev_id = -1; j = 0; counter = 0; num_real_procs = 0; /* loop through ranges - create a list of actual processor ids*/ for (i=0; i<num_ranges; i++) { upper_bound = response_buf[i*2+1]; counter = 0; tmp_id = response_buf[i*2]; /* loop through row_list entries - counting how many are in the range */ while (j < num_rows && row_list[j] <= upper_bound) { real_proc_id[j] = tmp_id; j++; counter++; } if (counter > 0 && tmp_id != prev_id) { num_real_procs++; } prev_id = tmp_id; } /* now we have the list of real processor ids (real_proc_id) - and the number of distinct ones - so now we can set up data to be sent - we have HYPRE_Int data and HYPRE_Complex data. that we will need to pack together */ /* first find out how many rows and elements we need to send per proc - so we can do storage */ ex_contact_procs = hypre_CTAlloc(HYPRE_Int, num_real_procs, HYPRE_MEMORY_HOST); num_rows_per_proc = hypre_CTAlloc(HYPRE_Int, num_real_procs, HYPRE_MEMORY_HOST); num_elements_total = hypre_CTAlloc(HYPRE_Int, num_real_procs, HYPRE_MEMORY_HOST); counter = 0; if (num_real_procs > 0 ) { ex_contact_procs[0] = real_proc_id[0]; num_rows_per_proc[0] = 1; num_elements_total[0] = row_list_num_elements[orig_order[0]]; /* loop through real procs - these are sorted (row_list is sorted also)*/ for (i=1; i < num_rows; i++) { if (real_proc_id[i] == ex_contact_procs[counter]) /* same processor */ { num_rows_per_proc[counter] += 1; /*another row */ num_elements_total[counter] += row_list_num_elements[orig_order[i]]; } else /* new processor */ { counter++; ex_contact_procs[counter] = real_proc_id[i]; num_rows_per_proc[counter] = 1; num_elements_total[counter] = row_list_num_elements[orig_order[i]]; } } } /* to pack together, we need to use the largest obj. size of (HYPRE_Int) and (HYPRE_Complex) - if these are much different, then we are wasting some storage, but I do not think that it will be a large amount since this function should not be used on really large amounts of data anyway*/ big_int_size = sizeof(HYPRE_BigInt); complex_size = sizeof(HYPRE_Complex); obj_size_bytes = hypre_max(big_int_size, complex_size); /* set up data to be sent to send procs */ /* for each proc, ex_contact_buf contains #rows, row #, no. elements, col indicies, col data, row #, no. elements, col indicies, col data, etc. */ /* first calculate total storage and make vec_starts arrays */ storage = 0; ex_contact_vec_starts = hypre_CTAlloc(HYPRE_Int, num_real_procs + 1, HYPRE_MEMORY_HOST); ex_contact_vec_starts[0] = -1; for (i=0; i < num_real_procs; i++) { storage += 1 + 2 * num_rows_per_proc[i] + 2* num_elements_total[i]; ex_contact_vec_starts[i+1] = -storage-1; /* need negative for next loop */ } hypre_TFree(num_elements_total, HYPRE_MEMORY_HOST); /*void_contact_buf = hypre_MAlloc(storage*obj_size_bytes);*/ void_contact_buf = hypre_CTAlloc(char, storage*obj_size_bytes, HYPRE_MEMORY_HOST); index_ptr = void_contact_buf; /* step through with this index */ /* for each proc: #rows, row #, no. elements, col indicies, col data, row #, no. elements, col indicies, col data, etc. */ /* un-sort real_proc_id - we want to access data arrays in order, so cheaper to do this*/ us_real_proc_id = hypre_CTAlloc(HYPRE_Int, num_rows, HYPRE_MEMORY_HOST); for (i=0; i < num_rows; i++) { us_real_proc_id[orig_order[i]] = real_proc_id[i]; } hypre_TFree(real_proc_id, HYPRE_MEMORY_HOST); counter = 0; /* index into data arrays */ prev_id = -1; for (i=0; i < num_rows; i++) { proc_id = us_real_proc_id[i]; /* can't use row list[i] - you loose the negative signs that differentiate add/set values */ row = off_proc_i[i*2]; num_elements = row_list_num_elements[i]; /* find position of this processor */ indx = hypre_BinarySearch(ex_contact_procs, proc_id, num_real_procs); in_i = ex_contact_vec_starts[indx]; index_ptr = (void *) ((char *) void_contact_buf + in_i*obj_size_bytes); /* first time for this processor - add the number of rows to the buffer */ if (in_i < 0) { in_i = -in_i - 1; /* re-calc. index_ptr since in_i was negative */ index_ptr = (void *) ((char *) void_contact_buf + in_i*obj_size_bytes); tmp_int = num_rows_per_proc[indx]; hypre_TMemcpy( index_ptr, &tmp_int, HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); index_ptr = (void *) ((char *) index_ptr + obj_size_bytes); in_i++; } /* add row # */ hypre_TMemcpy( index_ptr, &row, HYPRE_BigInt, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); index_ptr = (void *) ((char *) index_ptr + obj_size_bytes); in_i++; /* add number of elements */ hypre_TMemcpy( index_ptr, &num_elements, HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); index_ptr = (void *) ((char *) index_ptr + obj_size_bytes); in_i++; /* now add col indices */ for (j=0; j< num_elements; j++) { tmp_big_int = off_proc_j[counter+j]; /* col number */ hypre_TMemcpy( index_ptr, &tmp_big_int, HYPRE_BigInt, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); index_ptr = (void *) ((char *) index_ptr + obj_size_bytes); in_i ++; } /* now add data */ for (j=0; j< num_elements; j++) { tmp_complex = off_proc_data[counter++]; /* value */ hypre_TMemcpy( index_ptr, &tmp_complex, HYPRE_Complex, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); index_ptr = (void *) ((char *) index_ptr + obj_size_bytes); in_i++; } /* increment the indexes to keep track of where we are - we * adjust below to be actual starts*/ ex_contact_vec_starts[indx] = in_i; } /* some clean up */ hypre_TFree(response_buf, HYPRE_MEMORY_HOST); hypre_TFree(response_buf_starts, HYPRE_MEMORY_HOST); hypre_TFree(us_real_proc_id, HYPRE_MEMORY_HOST); hypre_TFree(orig_order, HYPRE_MEMORY_HOST); hypre_TFree(row_list, HYPRE_MEMORY_HOST); hypre_TFree(row_list_num_elements, HYPRE_MEMORY_HOST); hypre_TFree(num_rows_per_proc, HYPRE_MEMORY_HOST); for (i=num_real_procs; i > 0; i--) { ex_contact_vec_starts[i] = ex_contact_vec_starts[i-1]; } ex_contact_vec_starts[0] = 0; /* now send the data */ /***********************************/ /* first get the integer info in send_proc_obj */ /* the response we expect is just a confirmation*/ response_buf = NULL; response_buf_starts = NULL; /*build the response object*/ /* use the send_proc_obj for the info kept from contacts */ /*estimate inital storage allocation */ send_proc_obj.length = 0; send_proc_obj.storage_length = num_real_procs + 5; send_proc_obj.id = hypre_CTAlloc(HYPRE_Int, send_proc_obj.storage_length + 1, HYPRE_MEMORY_HOST); send_proc_obj.vec_starts = hypre_CTAlloc(HYPRE_Int, send_proc_obj.storage_length + 1, HYPRE_MEMORY_HOST); send_proc_obj.vec_starts[0] = 0; send_proc_obj.element_storage_length = storage + 20; send_proc_obj.v_elements = hypre_TAlloc(char, obj_size_bytes*send_proc_obj.element_storage_length, HYPRE_MEMORY_HOST); response_obj2.fill_response = hypre_FillResponseIJOffProcVals; response_obj2.data1 = NULL; response_obj2.data2 = &send_proc_obj; max_response_size = 0; hypre_DataExchangeList(num_real_procs, ex_contact_procs, void_contact_buf, ex_contact_vec_starts, obj_size_bytes, 0, &response_obj2, max_response_size, 2, comm, (void **) &response_buf, &response_buf_starts); hypre_TFree(response_buf, HYPRE_MEMORY_HOST); hypre_TFree(response_buf_starts, HYPRE_MEMORY_HOST); hypre_TFree(ex_contact_procs, HYPRE_MEMORY_HOST); hypre_TFree(void_contact_buf, HYPRE_MEMORY_HOST); hypre_TFree(ex_contact_vec_starts, HYPRE_MEMORY_HOST); /* Now we can unpack the send_proc_objects and call set and add to values functions. We unpack messages in a deterministic order, using processor rank */ num_recvs = send_proc_obj.length; argsort_contact_procs = hypre_CTAlloc(HYPRE_Int, num_recvs, HYPRE_MEMORY_HOST); for(i=0; i < num_recvs; i++) { argsort_contact_procs[i] = i; } /* This sort's the id array, but the original indices are stored in * argsort_contact_procs */ hypre_qsort2i( send_proc_obj.id, argsort_contact_procs, 0, num_recvs-1 ); /* alias */ recv_data_ptr = send_proc_obj.v_elements; recv_starts = send_proc_obj.vec_starts; for (i=0; i < num_recvs; i++) { /* Find the current processor in order, and reset recv_data_ptr to that processor's message */ original_proc_indx = argsort_contact_procs[i]; /*current_proc = send_proc_obj.id[i];*/ indx = recv_starts[original_proc_indx]; recv_data_ptr = (void *) ((char *) send_proc_obj.v_elements + indx*obj_size_bytes); /* get the number of rows for this recv */ hypre_TMemcpy( &num_rows, recv_data_ptr, HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); recv_data_ptr = (void *) ((char *)recv_data_ptr + obj_size_bytes); indx++; for (j=0; j < num_rows; j++) /* for each row: unpack info */ { /* row # */ hypre_TMemcpy( &row, recv_data_ptr, HYPRE_BigInt, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); recv_data_ptr = (void *) ((char *)recv_data_ptr + obj_size_bytes); indx++; /* num elements for this row */ hypre_TMemcpy( &num_elements, recv_data_ptr, HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); recv_data_ptr = (void *) ((char *)recv_data_ptr + obj_size_bytes); indx++; /* col indices */ /* Need to check this again !!!! */ if (big_int_size == obj_size_bytes) { col_ptr = (HYPRE_BigInt *) recv_data_ptr; recv_data_ptr = (void *) ((char *)recv_data_ptr + num_elements*obj_size_bytes); } else /* copy data */ { if (big_int_data_size < num_elements) { big_int_data = hypre_TReAlloc(big_int_data, HYPRE_BigInt, num_elements + 10, HYPRE_MEMORY_HOST); } for (k=0; k< num_elements; k++) { hypre_TMemcpy( &big_int_data[k], recv_data_ptr, HYPRE_BigInt, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); recv_data_ptr = (void *) ((char *)recv_data_ptr + obj_size_bytes); } col_ptr = big_int_data; } /* col data */ if (complex_size == obj_size_bytes) { col_data_ptr = (HYPRE_Complex *) recv_data_ptr; recv_data_ptr = (void *) ((char *)recv_data_ptr + num_elements*obj_size_bytes); } else /* copy data */ { if (complex_data_size < num_elements) { complex_data = hypre_TReAlloc(complex_data, HYPRE_Complex, num_elements + 10, HYPRE_MEMORY_HOST); } for (k=0; k< num_elements; k++) { hypre_TMemcpy( &complex_data[k], recv_data_ptr, HYPRE_Complex, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); recv_data_ptr = (void *) ((char *)recv_data_ptr + obj_size_bytes); } col_data_ptr = complex_data; } if (memory_location == HYPRE_MEMORY_HOST) { hypre_IJMatrixAddToValuesParCSR(matrix, 1, &num_elements, &row, &row_index, col_ptr, col_data_ptr); } else { HYPRE_Int nelm_new = off_proc_nelm_recv_cur + num_elements; if (nelm_new > off_proc_nelm_recv_max) { off_proc_nelm_recv_max = nelm_new * 2; off_proc_i_recv = hypre_TReAlloc(off_proc_i_recv, HYPRE_BigInt, off_proc_nelm_recv_max, HYPRE_MEMORY_HOST); off_proc_j_recv = hypre_TReAlloc(off_proc_j_recv, HYPRE_BigInt, off_proc_nelm_recv_max, HYPRE_MEMORY_HOST); off_proc_data_recv = hypre_TReAlloc(off_proc_data_recv, HYPRE_Complex, off_proc_nelm_recv_max, HYPRE_MEMORY_HOST); } HYPRE_Int i; for (i = 0; i < num_elements; i++) { off_proc_i_recv[off_proc_nelm_recv_cur + i] = row; } hypre_TMemcpy(off_proc_j_recv + off_proc_nelm_recv_cur, col_ptr, HYPRE_BigInt, num_elements, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); hypre_TMemcpy(off_proc_data_recv + off_proc_nelm_recv_cur, col_data_ptr, HYPRE_Complex, num_elements, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); off_proc_nelm_recv_cur = nelm_new; } indx += (num_elements*2); } } if (memory_location == HYPRE_MEMORY_DEVICE) { off_proc_i_recv_d = hypre_TAlloc(HYPRE_BigInt, off_proc_nelm_recv_cur, HYPRE_MEMORY_DEVICE); off_proc_j_recv_d = hypre_TAlloc(HYPRE_BigInt, off_proc_nelm_recv_cur, HYPRE_MEMORY_DEVICE); off_proc_data_recv_d = hypre_TAlloc(HYPRE_Complex, off_proc_nelm_recv_cur, HYPRE_MEMORY_DEVICE); hypre_TMemcpy(off_proc_i_recv_d, off_proc_i_recv, HYPRE_BigInt, off_proc_nelm_recv_cur, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST); hypre_TMemcpy(off_proc_j_recv_d, off_proc_j_recv, HYPRE_BigInt, off_proc_nelm_recv_cur, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST); hypre_TMemcpy(off_proc_data_recv_d, off_proc_data_recv, HYPRE_Complex, off_proc_nelm_recv_cur, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_IJMatrixSetAddValuesParCSRDevice(matrix, off_proc_nelm_recv_cur, NULL, off_proc_i_recv_d, NULL, off_proc_j_recv_d, off_proc_data_recv_d, "add"); #endif } hypre_TFree(send_proc_obj.v_elements, HYPRE_MEMORY_HOST); hypre_TFree(send_proc_obj.vec_starts, HYPRE_MEMORY_HOST); hypre_TFree(send_proc_obj.id, HYPRE_MEMORY_HOST); hypre_TFree(argsort_contact_procs, HYPRE_MEMORY_HOST); if (big_int_data) { hypre_TFree(big_int_data, HYPRE_MEMORY_HOST); } if (complex_data) { hypre_TFree(complex_data, HYPRE_MEMORY_HOST); } if (memory_location == HYPRE_MEMORY_DEVICE) { hypre_TFree(off_proc_i, HYPRE_MEMORY_HOST); hypre_TFree(off_proc_j, HYPRE_MEMORY_HOST); hypre_TFree(off_proc_data, HYPRE_MEMORY_HOST); } hypre_TFree(off_proc_i_recv, HYPRE_MEMORY_HOST); hypre_TFree(off_proc_j_recv, HYPRE_MEMORY_HOST); hypre_TFree(off_proc_data_recv, HYPRE_MEMORY_HOST); hypre_TFree(off_proc_i_recv_d, HYPRE_MEMORY_DEVICE); hypre_TFree(off_proc_j_recv_d, HYPRE_MEMORY_DEVICE); hypre_TFree(off_proc_data_recv_d, HYPRE_MEMORY_DEVICE); return hypre_error_flag; } /*-------------------------------------------------------------------- * hypre_FillResponseIJOffProcVals * Fill response function for the previous function (2nd data exchange) *--------------------------------------------------------------------*/ HYPRE_Int hypre_FillResponseIJOffProcVals(void *p_recv_contact_buf, HYPRE_Int contact_size, HYPRE_Int contact_proc, void *ro, MPI_Comm comm, void **p_send_response_buf, HYPRE_Int *response_message_size ) { HYPRE_Int myid; HYPRE_Int index, count, elength; HYPRE_Int object_size; void *index_ptr; hypre_DataExchangeResponse *response_obj = (hypre_DataExchangeResponse*) ro; hypre_ProcListElements *send_proc_obj = (hypre_ProcListElements*) response_obj->data2; object_size = hypre_max(sizeof(HYPRE_BigInt), sizeof(HYPRE_Complex)); hypre_MPI_Comm_rank(comm, &myid ); /*check to see if we need to allocate more space in send_proc_obj for vec starts * and id */ if (send_proc_obj->length == send_proc_obj->storage_length) { send_proc_obj->storage_length +=20; /*add space for 20 more contact*/ send_proc_obj->vec_starts = hypre_TReAlloc(send_proc_obj->vec_starts,HYPRE_Int, send_proc_obj->storage_length + 1, HYPRE_MEMORY_HOST); if( send_proc_obj->id != NULL) { send_proc_obj->id = hypre_TReAlloc(send_proc_obj->id, HYPRE_Int, send_proc_obj->storage_length + 1, HYPRE_MEMORY_HOST); } } /*initialize*/ count = send_proc_obj->length; index = send_proc_obj->vec_starts[count]; /* current number of elements */ if( send_proc_obj->id != NULL) { send_proc_obj->id[count] = contact_proc; } /*do we need more storage for the elements?*/ if (send_proc_obj->element_storage_length < index + contact_size) { elength = hypre_max(contact_size, 100); elength += index; send_proc_obj->v_elements = hypre_TReAlloc((char*)send_proc_obj->v_elements, char, elength*object_size, HYPRE_MEMORY_HOST); send_proc_obj->element_storage_length = elength; } /*populate send_proc_obj*/ index_ptr = (void *) ((char *) send_proc_obj->v_elements + index*object_size); hypre_TMemcpy(index_ptr, p_recv_contact_buf , char, object_size*contact_size, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); send_proc_obj->vec_starts[count+1] = index + contact_size; send_proc_obj->length++; /* output - no message to return (confirmation) */ *response_message_size = 0; return hypre_error_flag; } /*--------------------------------------------------------------------*/ HYPRE_Int hypre_FindProc(HYPRE_BigInt *list, HYPRE_BigInt value, HYPRE_Int list_length) { HYPRE_Int low, high, m; low = 0; high = list_length; if (value >= list[high] || value < list[low]) { return -1; } else { while (low+1 < high) { m = (low + high) / 2; if (value < list[m]) { high = m; } else if (value >= list[m]) { low = m; } } return low; } } /****************************************************************************** * * hypre_IJMatrixAssembleParCSR * * assembles IJMatrix from AuxParCSRMatrix auxiliary structure *****************************************************************************/ HYPRE_Int hypre_IJMatrixAssembleParCSR(hypre_IJMatrix *matrix) { MPI_Comm comm = hypre_IJMatrixComm(matrix); hypre_ParCSRMatrix *par_matrix = (hypre_ParCSRMatrix*) hypre_IJMatrixObject(matrix); hypre_AuxParCSRMatrix *aux_matrix = (hypre_AuxParCSRMatrix*) hypre_IJMatrixTranslator(matrix); HYPRE_BigInt *row_partitioning = hypre_IJMatrixRowPartitioning(matrix); HYPRE_BigInt *col_partitioning = hypre_IJMatrixColPartitioning(matrix); hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(par_matrix); hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(par_matrix); HYPRE_Int *diag_i = hypre_CSRMatrixI(diag); HYPRE_Int *offd_i = hypre_CSRMatrixI(offd); HYPRE_Int *diag_j; HYPRE_Int *offd_j = NULL; HYPRE_Complex *diag_data; HYPRE_Complex *offd_data = NULL; HYPRE_Int i, j, j0; HYPRE_Int num_cols_offd; HYPRE_Int *diag_pos; HYPRE_BigInt *col_map_offd; HYPRE_Int *rownnz; HYPRE_Int *row_length; HYPRE_BigInt **aux_j; HYPRE_Complex **aux_data; HYPRE_Int my_id, num_procs; HYPRE_Int num_rows; HYPRE_Int num_rownnz; HYPRE_Int i_diag, i_offd; HYPRE_BigInt col_0, col_n; HYPRE_Int nnz_offd; HYPRE_BigInt *big_offd_j; HYPRE_BigInt *tmp_j; HYPRE_Complex temp; HYPRE_BigInt base = hypre_IJMatrixGlobalFirstCol(matrix); HYPRE_Int off_proc_i_indx; HYPRE_Int max_off_proc_elmts; HYPRE_Int current_num_elmts; HYPRE_BigInt *off_proc_i; HYPRE_BigInt *off_proc_j; HYPRE_Complex *off_proc_data; HYPRE_Int offd_proc_elmts; //HYPRE_Int new_off_proc_i_indx; //HYPRE_Int cancel_indx; //HYPRE_Int col_indx; //HYPRE_Int current_indx; //HYPRE_Int current_i; //HYPRE_Int row_len; HYPRE_Int max_num_threads; HYPRE_Int aux_flag, aux_flag_global; HYPRE_ANNOTATE_FUNC_BEGIN; max_num_threads = hypre_NumThreads(); /* first find out if anyone has an aux_matrix, and create one if you don't * have one, but other procs do */ aux_flag = 0; aux_flag_global = 0; if (aux_matrix) { aux_flag = 1; } hypre_MPI_Allreduce(&aux_flag, &aux_flag_global, 1, HYPRE_MPI_INT, hypre_MPI_SUM, comm); if (aux_flag_global && (!aux_flag)) { hypre_MPI_Comm_rank(comm, &my_id); num_rows = (HYPRE_Int)(row_partitioning[my_id+1] - row_partitioning[my_id]); hypre_AuxParCSRMatrixCreate(&aux_matrix, num_rows, num_rows, NULL); hypre_AuxParCSRMatrixNeedAux(aux_matrix) = 0; hypre_IJMatrixTranslator(matrix) = aux_matrix; } if (aux_matrix) { /* first delete all cancelled elements */ /*cancel_indx = hypre_AuxParCSRMatrixCancelIndx(aux_matrix); if (cancel_indx) { current_num_elmts=hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix); off_proc_i=hypre_AuxParCSRMatrixOffProcI(aux_matrix); off_proc_j=hypre_AuxParCSRMatrixOffProcJ(aux_matrix); off_proc_data=hypre_AuxParCSRMatrixOffProcData(aux_matrix); off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix); col_indx = 0; current_i = 0; current_indx = 0; new_off_proc_i_indx = off_proc_i_indx; for (i=0; i < off_proc_i_indx; i= i+2) { row_len = off_proc_i[i+1]; for (j=0; j < off_proc_i[i+1]; j++) { if (off_proc_j[col_indx] == -1) { col_indx++; row_len--; current_num_elmts--; } else { off_proc_j[current_indx] = off_proc_j[col_indx]; off_proc_data[current_indx++] = off_proc_data[col_indx++]; } } if (row_len) { off_proc_i[current_i] = off_proc_i[i]; off_proc_i[current_i+1] = row_len; current_i += 2; } else { new_off_proc_i_indx -= 2; } } hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix) = new_off_proc_i_indx; hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix) = current_num_elmts; }*/ off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix); hypre_MPI_Allreduce(&off_proc_i_indx, &offd_proc_elmts, 1, HYPRE_MPI_INT, hypre_MPI_SUM, comm); if (offd_proc_elmts) { max_off_proc_elmts=hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix); current_num_elmts=hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix); off_proc_i=hypre_AuxParCSRMatrixOffProcI(aux_matrix); off_proc_j=hypre_AuxParCSRMatrixOffProcJ(aux_matrix); off_proc_data=hypre_AuxParCSRMatrixOffProcData(aux_matrix); hypre_IJMatrixAssembleOffProcValsParCSR( matrix,off_proc_i_indx, max_off_proc_elmts, current_num_elmts, HYPRE_MEMORY_HOST, off_proc_i, off_proc_j, off_proc_data); } } if (hypre_IJMatrixAssembleFlag(matrix) == 0) { hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); num_rows = (HYPRE_Int)(row_partitioning[1] - row_partitioning[0]); col_0 = col_partitioning[0]; col_n = col_partitioning[1]-1; /* move data into ParCSRMatrix if not there already */ if (hypre_AuxParCSRMatrixNeedAux(aux_matrix)) { HYPRE_Int *diag_array; HYPRE_Int *offd_array; /* Update nonzero rows of aux_matrix */ hypre_AuxParCSRMatrixSetRownnz(aux_matrix); aux_j = hypre_AuxParCSRMatrixAuxJ(aux_matrix); aux_data = hypre_AuxParCSRMatrixAuxData(aux_matrix); row_length = hypre_AuxParCSRMatrixRowLength(aux_matrix); num_rownnz = hypre_AuxParCSRMatrixLocalNumRownnz(aux_matrix); rownnz = hypre_AuxParCSRMatrixRownnz(aux_matrix); diag_array = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST); offd_array = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST); diag_pos = hypre_TAlloc(HYPRE_Int, num_rownnz, HYPRE_MEMORY_HOST); i_diag = i_offd = 0; #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(i, j, i_diag, i_offd) #endif { HYPRE_BigInt *local_j; HYPRE_Complex *local_data; HYPRE_Int ii, rest, size, ns, ne; HYPRE_Int num_threads, my_thread_num; num_threads = hypre_NumActiveThreads(); my_thread_num = hypre_GetThreadNum(); size = num_rownnz/num_threads; rest = num_rownnz - size*num_threads; if (my_thread_num < rest) { ns = my_thread_num*(size + 1); ne = (my_thread_num+1)*(size + 1); } else { ns = my_thread_num*size + rest; ne = (my_thread_num+1)*size + rest; } i_diag = i_offd = 0; for (i = ns; i < ne; i++) { ii = rownnz ? rownnz[i] : i; local_j = aux_j[ii]; local_data = aux_data[ii]; diag_pos[i] = -1; for (j = 0; j < row_length[ii]; j++) { if (local_j[j] < col_0 || local_j[j] > col_n) { i_offd++; } else { i_diag++; if ((HYPRE_Int)(local_j[j] - col_0) == i) { diag_pos[i] = j; } } } } diag_array[my_thread_num] = i_diag; offd_array[my_thread_num] = i_offd; #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (my_thread_num == 0) { i_diag = 0; i_offd = 0; for (i = 0; i < num_threads; i++) { i_diag += diag_array[i]; i_offd += offd_array[i]; diag_array[i] = i_diag; offd_array[i] = i_offd; } diag_i[num_rows] = i_diag; offd_i[num_rows] = i_offd; hypre_TFree(hypre_CSRMatrixJ(diag), hypre_CSRMatrixMemoryLocation(diag)); hypre_TFree(hypre_CSRMatrixData(diag), hypre_CSRMatrixMemoryLocation(diag)); hypre_TFree(hypre_CSRMatrixJ(offd), hypre_CSRMatrixMemoryLocation(offd)); hypre_TFree(hypre_CSRMatrixData(offd), hypre_CSRMatrixMemoryLocation(offd)); hypre_TFree(hypre_CSRMatrixBigJ(offd), hypre_CSRMatrixMemoryLocation(offd)); diag_j = hypre_CTAlloc(HYPRE_Int, i_diag, hypre_CSRMatrixMemoryLocation(diag)); diag_data = hypre_CTAlloc(HYPRE_Complex, i_diag, hypre_CSRMatrixMemoryLocation(diag)); offd_j = hypre_CTAlloc(HYPRE_Int, i_offd, hypre_CSRMatrixMemoryLocation(offd)); offd_data = hypre_CTAlloc(HYPRE_Complex, i_offd, hypre_CSRMatrixMemoryLocation(offd)); big_offd_j = hypre_CTAlloc(HYPRE_BigInt, i_offd, hypre_CSRMatrixMemoryLocation(offd)); } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (my_thread_num) { i_diag = diag_array[my_thread_num-1]; i_offd = offd_array[my_thread_num-1]; } else { i_diag = 0; i_offd = 0; } for (i = ns; i < ne; i++) { ii = rownnz ? rownnz[i] : i; diag_i[ii] = i_diag; offd_i[ii] = i_offd; local_j = aux_j[ii]; local_data = aux_data[ii]; if (diag_pos[i] > -1) { diag_j[i_diag] = (HYPRE_Int)(local_j[diag_pos[i]] - col_0); diag_data[i_diag++] = local_data[diag_pos[i]]; } for (j = 0; j < row_length[ii]; j++) { if (local_j[j] < col_0 || local_j[j] > col_n) { big_offd_j[i_offd] = local_j[j]; offd_data[i_offd++] = local_data[j]; } else if (j != diag_pos[i]) { diag_j[i_diag] = (HYPRE_Int)(local_j[j] - col_0); diag_data[i_diag++] = local_data[j]; } } } /* Correct diag_i and offd_i */ if (rownnz != NULL) { #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif for (i = ns; i < (ne-1); i++) { for (ii = rownnz[i] + 1; ii < rownnz[i+1]; ii++) { diag_i[ii] = diag_i[rownnz[i+1]]; offd_i[ii] = offd_i[rownnz[i+1]]; } } if (my_thread_num < (num_threads - 1)) { for (ii = rownnz[ne-1] + 1; ii < rownnz[ne]; ii++) { diag_i[ii] = diag_i[rownnz[ne]]; offd_i[ii] = offd_i[rownnz[ne]]; } } else { for (ii = rownnz[ne-1] + 1; ii < num_rows; ii++) { diag_i[ii] = diag_i[num_rows]; offd_i[ii] = offd_i[num_rows]; } } } } /* end parallel region */ hypre_TFree(diag_array, HYPRE_MEMORY_HOST); hypre_TFree(offd_array, HYPRE_MEMORY_HOST); hypre_CSRMatrixJ(diag) = diag_j; hypre_CSRMatrixData(diag) = diag_data; hypre_CSRMatrixNumNonzeros(diag) = diag_i[num_rows]; if (offd_i[num_rows] > 0) { hypre_CSRMatrixJ(offd) = offd_j; hypre_CSRMatrixBigJ(offd) = big_offd_j; hypre_CSRMatrixData(offd) = offd_data; } hypre_CSRMatrixNumNonzeros(offd) = offd_i[num_rows]; hypre_TFree(diag_pos, HYPRE_MEMORY_HOST); } else { /* move diagonal element into first space */ big_offd_j = hypre_CSRMatrixBigJ(offd); diag_j = hypre_CSRMatrixJ(diag); diag_data = hypre_CSRMatrixData(diag); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private (i,j,j0,temp) #endif for (i = 0; i < num_rows; i++) { j0 = diag_i[i]; for (j = j0; j < diag_i[i+1]; j++) { if (diag_j[j] == i) { temp = diag_data[j0]; diag_data[j0] = diag_data[j]; diag_data[j] = temp; diag_j[j] = diag_j[j0]; diag_j[j0] = i; break; } } } offd_j = hypre_CSRMatrixJ(offd); if (!offd_j && offd_i[num_rows]) { offd_j = hypre_CTAlloc(HYPRE_Int, offd_i[num_rows], hypre_CSRMatrixMemoryLocation(offd)); hypre_CSRMatrixJ(offd) = offd_j; } } /* generate the nonzero rows inside offd and diag by calling */ hypre_CSRMatrixSetRownnz(diag); hypre_CSRMatrixSetRownnz(offd); /* generate col_map_offd */ nnz_offd = offd_i[num_rows]; if (nnz_offd) { tmp_j = hypre_CTAlloc(HYPRE_BigInt, nnz_offd, HYPRE_MEMORY_HOST); for (i = 0; i < nnz_offd; i++) { tmp_j[i] = big_offd_j[i]; } hypre_BigQsort0(tmp_j,0,nnz_offd-1); num_cols_offd = 1; for (i = 0; i < nnz_offd-1; i++) { if (tmp_j[i+1] > tmp_j[i]) { tmp_j[num_cols_offd++] = tmp_j[i+1]; } } col_map_offd = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd, HYPRE_MEMORY_HOST); for (i = 0; i < num_cols_offd; i++) { col_map_offd[i] = tmp_j[i]; } #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) #endif for (i = 0; i < nnz_offd; i++) { offd_j[i] = hypre_BigBinarySearch(col_map_offd,big_offd_j[i],num_cols_offd); } if (base) { for (i = 0; i < num_cols_offd; i++) { col_map_offd[i] -= base; } } hypre_ParCSRMatrixColMapOffd(par_matrix) = col_map_offd; hypre_CSRMatrixNumCols(offd) = num_cols_offd; hypre_TFree(tmp_j, HYPRE_MEMORY_HOST); hypre_TFree(big_offd_j, hypre_CSRMatrixMemoryLocation(offd)); hypre_CSRMatrixBigJ(offd) = NULL; } hypre_IJMatrixAssembleFlag(matrix) = 1; } hypre_AuxParCSRMatrixDestroy(aux_matrix); hypre_IJMatrixTranslator(matrix) = NULL; HYPRE_ANNOTATE_FUNC_END; return hypre_error_flag; } /****************************************************************************** * * IJMatrix_ParCSR interface * *****************************************************************************/ #include "_hypre_IJ_mv.h" #include "../HYPRE.h" /****************************************************************************** * * hypre_IJMatrixSetValuesOMPParCSR * * sets values in an IJMatrix before assembly, * use of this routine requires that the values in rows are different from each * other, i.e rows[i] != rows[j] for i != j * to ensure accurate threading * *****************************************************************************/ HYPRE_Int hypre_IJMatrixSetValuesOMPParCSR( hypre_IJMatrix *matrix, HYPRE_Int nrows, HYPRE_Int *ncols, const HYPRE_BigInt *rows, const HYPRE_Int *row_indexes, const HYPRE_BigInt *cols, const HYPRE_Complex *values ) { hypre_ParCSRMatrix *par_matrix; hypre_CSRMatrix *diag, *offd; hypre_AuxParCSRMatrix *aux_matrix; HYPRE_BigInt *row_partitioning; HYPRE_BigInt *col_partitioning; MPI_Comm comm = hypre_IJMatrixComm(matrix); HYPRE_Int num_procs, my_id; HYPRE_BigInt col_0, col_n, first; //HYPRE_Int cancel_indx; HYPRE_BigInt **aux_j; HYPRE_Complex **aux_data; HYPRE_Int *row_length, *row_space; HYPRE_Int need_aux; HYPRE_Int *diag_i; HYPRE_Int *diag_j; HYPRE_Complex *diag_data; HYPRE_Int *offd_i; HYPRE_Int *offd_j; HYPRE_BigInt *big_offd_j; HYPRE_Complex *offd_data; HYPRE_Int pstart; /*HYPRE_Int current_num_elmts;*/ /*HYPRE_Int max_off_proc_elmts;*/ //HYPRE_Int off_proc_i_indx; //HYPRE_BigInt *off_proc_i; //HYPRE_BigInt *off_proc_j; //HYPRE_Int *offproc_cnt; HYPRE_Int print_level = hypre_IJMatrixPrintLevel(matrix); //HYPRE_Int max_num_threads; HYPRE_Int error_flag = 0; /*HYPRE_Complex *off_proc_data;*/ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); //max_num_threads = hypre_NumThreads(); par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject( matrix ); row_partitioning = hypre_IJMatrixRowPartitioning(matrix); col_partitioning = hypre_IJMatrixColPartitioning(matrix); //offproc_cnt = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST); col_0 = col_partitioning[0]; col_n = col_partitioning[1]-1; first = hypre_IJMatrixGlobalFirstCol(matrix); pstart = 0; if (nrows < 0) { hypre_error_in_arg(2); if (print_level) { hypre_printf("Error! nrows negative! HYPRE_IJMatrixSetValues\n"); } return hypre_error_flag; } if (hypre_IJMatrixAssembleFlag(matrix)) /* matrix already assembled*/ { HYPRE_BigInt *col_map_offd; HYPRE_Int num_cols_offd; diag = hypre_ParCSRMatrixDiag(par_matrix); diag_i = hypre_CSRMatrixI(diag); diag_j = hypre_CSRMatrixJ(diag); diag_data = hypre_CSRMatrixData(diag); offd = hypre_ParCSRMatrixOffd(par_matrix); offd_i = hypre_CSRMatrixI(offd); num_cols_offd = hypre_CSRMatrixNumCols(offd); if (num_cols_offd) { col_map_offd = hypre_ParCSRMatrixColMapOffd(par_matrix); offd_j = hypre_CSRMatrixJ(offd); offd_data = hypre_CSRMatrixData(offd); } aux_matrix = (hypre_AuxParCSRMatrix*) hypre_IJMatrixTranslator(matrix); /*if (aux_matrix) { current_num_elmts = hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix); off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix); off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix); off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix); cancel_indx = hypre_AuxParCSRMatrixCancelIndx(aux_matrix); }*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel #endif { HYPRE_Int j_offd; HYPRE_Int num_threads, my_thread_num; HYPRE_Int len, rest, ns, ne; HYPRE_Int pos_diag, pos_offd; HYPRE_Int len_diag, len_offd; //HYPRE_Int row_len; HYPRE_Int row_local; HYPRE_Int i, j, ii, n; HYPRE_BigInt row; HYPRE_Int not_found, size, indx; num_threads = hypre_NumActiveThreads(); my_thread_num = hypre_GetThreadNum(); len = nrows/num_threads; rest = nrows - len*num_threads; if (my_thread_num < rest) { ns = my_thread_num*(len+1); ne = (my_thread_num+1)*(len+1); } else { ns = my_thread_num*len+rest; ne = (my_thread_num+1)*len+rest; } for (ii=ns; ii < ne; ii++) { row = rows[ii]; n = ncols ? ncols[ii] : 1; if (n == 0) /* empty row */ { continue; } indx = row_indexes[ii]; /* processor owns the row */ if (row >= row_partitioning[pstart] && row < row_partitioning[pstart+1]) { row_local = (HYPRE_Int)(row - row_partitioning[pstart]); /* compute local row number */ size = diag_i[row_local+1] - diag_i[row_local] + offd_i[row_local+1] - offd_i[row_local]; if (n > size) { hypre_error(HYPRE_ERROR_GENERIC); #ifdef HYPRE_USING_OPENMP #pragma omp atomic #endif error_flag++; if (print_level) { hypre_printf (" row %b too long! \n", row); } break; /*return hypre_error_flag; */ } pos_diag = diag_i[row_local]; pos_offd = offd_i[row_local]; len_diag = diag_i[row_local+1]; len_offd = offd_i[row_local+1]; not_found = 1; for (i=0; i < n; i++) { if (cols[indx] < col_0 || cols[indx] > col_n) /* insert into offd */ { j_offd = hypre_BigBinarySearch(col_map_offd,cols[indx]-first, num_cols_offd); if (j_offd == -1) { hypre_error(HYPRE_ERROR_GENERIC); #ifdef HYPRE_USING_OPENMP #pragma omp atomic #endif error_flag++; if (print_level) { hypre_printf (" Error, element %b %b does not exist\n", row, cols[indx]); } break; /*return hypre_error_flag; */ } for (j=pos_offd; j < len_offd; j++) { if (offd_j[j] == j_offd) { offd_data[j] = values[indx]; not_found = 0; break; } } if (not_found) { hypre_error(HYPRE_ERROR_GENERIC); #ifdef HYPRE_USING_OPENMP #pragma omp atomic #endif error_flag++; if (print_level) { hypre_printf (" Error, element %b %b does not exist\n", row, cols[indx]); } break; /*return hypre_error_flag;*/ } not_found = 1; } /* diagonal element */ else if (cols[indx] == row) { if (diag_j[pos_diag] != row_local) { hypre_error(HYPRE_ERROR_GENERIC); #ifdef HYPRE_USING_OPENMP #pragma omp atomic #endif error_flag++; if (print_level) { hypre_printf (" Error, element %b %b does not exist\n", row, cols[indx]); } break; /*return hypre_error_flag; */ } diag_data[pos_diag] = values[indx]; } else /* insert into diag */ { for (j=pos_diag; j < len_diag; j++) { if (diag_j[j] == (HYPRE_Int)(cols[indx]-col_0)) { diag_data[j] = values[indx]; not_found = 0; break; } } if (not_found) { hypre_error(HYPRE_ERROR_GENERIC); #ifdef HYPRE_USING_OPENMP #pragma omp atomic #endif error_flag++; if (print_level) { hypre_printf (" Error, element %b %b does not exist\n", row, cols[indx]); } break; /*return hypre_error_flag;*/ } } indx++; } } /* processor does not own the row */ //else /*search for previous occurrences and cancel them */ /*{ if (aux_matrix) { col_indx = 0; for (i=0; i < off_proc_i_indx; i=i+2) { row_len = off_proc_i[i+1]; if (off_proc_i[i] == row) { for (j=0; j < n; j++) { cnt1 = col_indx; for (k=0; k < row_len; k++) { if (off_proc_j[cnt1] == cols[j]) { off_proc_j[cnt1++] = -1; offproc_cnt[my_thread_num]++; */ /*cancel_indx++;*/ /* if no repetition allowed */ /* off_proc_j[col_indx] = -1; col_indx -= k; break; */ /*} else { cnt1++; } } } col_indx += row_len; } else { col_indx += row_len; } }*/ /*hypre_AuxParCSRMatrixCancelIndx(aux_matrix) = cancel_indx;*/ //} //} } } /*end parallel region */ } else /* matrix not assembled */ { aux_matrix = (hypre_AuxParCSRMatrix*) hypre_IJMatrixTranslator(matrix); /*if (aux_matrix) { current_num_elmts = hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix); off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix); off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix); off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix); cancel_indx = hypre_AuxParCSRMatrixCancelIndx(aux_matrix); }*/ row_space = hypre_AuxParCSRMatrixRowSpace(aux_matrix); row_length = hypre_AuxParCSRMatrixRowLength(aux_matrix); need_aux = hypre_AuxParCSRMatrixNeedAux(aux_matrix); if (need_aux) { aux_j = hypre_AuxParCSRMatrixAuxJ(aux_matrix); aux_data = hypre_AuxParCSRMatrixAuxData(aux_matrix); } else { diag = hypre_ParCSRMatrixDiag(par_matrix); diag_i = hypre_CSRMatrixI(diag); diag_j = hypre_CSRMatrixJ(diag); diag_data = hypre_CSRMatrixData(diag); offd = hypre_ParCSRMatrixOffd(par_matrix); offd_i = hypre_CSRMatrixI(offd); if (num_procs > 1) { offd_data = hypre_CSRMatrixData(offd); big_offd_j = hypre_CSRMatrixBigJ(offd); if (!big_offd_j) { big_offd_j = hypre_CTAlloc(HYPRE_BigInt, offd_i[hypre_CSRMatrixNumRows(offd)], hypre_CSRMatrixMemoryLocation(offd)); hypre_CSRMatrixBigJ(offd) = big_offd_j; } } } #ifdef HYPRE_USING_OPENMP #pragma omp parallel #endif { HYPRE_Int num_threads, my_thread_num; HYPRE_Int len, rest, ns, ne; HYPRE_BigInt *tmp_j = NULL; HYPRE_BigInt *local_j = NULL; HYPRE_Complex *tmp_data = NULL; HYPRE_Complex *local_data = NULL; HYPRE_Int tmp_indx; //HYPRE_Int row_len; HYPRE_Int row_local; HYPRE_Int i, j, ii, n; HYPRE_BigInt row; HYPRE_Int not_found, size, indx; HYPRE_Int old_size, space, cnt; num_threads = hypre_NumActiveThreads(); my_thread_num = hypre_GetThreadNum(); len = nrows/num_threads; rest = nrows - len*num_threads; if (my_thread_num < rest) { ns = my_thread_num*(len+1); ne = (my_thread_num+1)*(len+1); } else { ns = my_thread_num*len+rest; ne = (my_thread_num+1)*len+rest; } for (ii=ns; ii < ne; ii++) { row = rows[ii]; n = ncols ? ncols[ii] : 1; if (n == 0) /* empty row */ { continue; } indx = row_indexes[ii]; /* processor owns the row */ if (row >= row_partitioning[pstart] && row < row_partitioning[pstart+1]) { row_local = (HYPRE_Int)(row - row_partitioning[pstart]); /* compute local row number */ if (need_aux) { local_j = aux_j[row_local]; local_data = aux_data[row_local]; space = row_space[row_local]; old_size = row_length[row_local]; size = space - old_size; if (size < n) { size = n - size; tmp_j = hypre_CTAlloc(HYPRE_BigInt, size, HYPRE_MEMORY_HOST); tmp_data = hypre_CTAlloc(HYPRE_Complex, size, HYPRE_MEMORY_HOST); } tmp_indx = 0; not_found = 1; size = old_size; for (i=0; i < n; i++) { for (j=0; j < old_size; j++) { if (local_j[j] == cols[indx]) { local_data[j] = values[indx]; not_found = 0; break; } } if (not_found) { if (size < space) { local_j[size] = cols[indx]; local_data[size++] = values[indx]; } else { tmp_j[tmp_indx] = cols[indx]; tmp_data[tmp_indx++] = values[indx]; } } not_found = 1; indx++; } row_length[row_local] = size+tmp_indx; if (tmp_indx) { aux_j[row_local] = hypre_TReAlloc(aux_j[row_local],HYPRE_BigInt, size+tmp_indx, HYPRE_MEMORY_HOST); aux_data[row_local] = hypre_TReAlloc(aux_data[row_local], HYPRE_Complex, size+tmp_indx, HYPRE_MEMORY_HOST); row_space[row_local] = size+tmp_indx; local_j = aux_j[row_local]; local_data = aux_data[row_local]; } cnt = size; for (i=0; i < tmp_indx; i++) { local_j[cnt] = tmp_j[i]; local_data[cnt++] = tmp_data[i]; } if (tmp_j) { hypre_TFree(tmp_j, HYPRE_MEMORY_HOST); hypre_TFree(tmp_data, HYPRE_MEMORY_HOST); } } else /* insert immediately into data in ParCSRMatrix structure */ { HYPRE_Int offd_indx, diag_indx; HYPRE_Int offd_space, diag_space; HYPRE_Int cnt_diag, cnt_offd; offd_indx = hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local]; diag_indx = hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local]; cnt_diag = diag_indx; cnt_offd = offd_indx; diag_space = diag_i[row_local+1]; offd_space = offd_i[row_local+1]; not_found = 1; for (i=0; i < n; i++) { if (cols[indx] < col_0 || cols[indx] > col_n) /* insert into offd */ { for (j=offd_i[row_local]; j < offd_indx; j++) { if (big_offd_j[j] == cols[indx]) { offd_data[j] = values[indx]; not_found = 0; break; } } if (not_found) { if (cnt_offd < offd_space) { big_offd_j[cnt_offd] = cols[indx]; offd_data[cnt_offd++] = values[indx]; } else { hypre_error(HYPRE_ERROR_GENERIC); #ifdef HYPRE_USING_OPENMP #pragma omp atomic #endif error_flag++; if (print_level) { hypre_printf("Error in row %b ! Too many elements!\n", row); } break; /*return hypre_error_flag;*/ } } not_found = 1; } else /* insert into diag */ { for (j=diag_i[row_local]; j < diag_indx; j++) { if (diag_j[j] == (HYPRE_Int)(cols[indx]-col_0)) { diag_data[j] = values[indx]; not_found = 0; break; } } if (not_found) { if (cnt_diag < diag_space) { diag_j[cnt_diag] = (HYPRE_Int)(cols[indx]-col_0); diag_data[cnt_diag++] = values[indx]; } else { hypre_error(HYPRE_ERROR_GENERIC); #ifdef HYPRE_USING_OPENMP #pragma omp atomic #endif error_flag++; if (print_level) { hypre_printf("Error in row %b ! Too many elements !\n", row); } break; /*return hypre_error_flag;*/ } } not_found = 1; } indx++; } hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local] = cnt_diag; hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local] = cnt_offd; } } /* processor does not own the row */ /*else { if (aux_matrix) { col_indx = 0; for (i=0; i < off_proc_i_indx; i=i+2) { row_len = off_proc_i[i+1]; if (off_proc_i[i] == row) { for (j=0; j < n; j++) { cnt1 = col_indx; for (k=0; k < row_len; k++) { if (off_proc_j[cnt1] == cols[j]) { off_proc_j[cnt1++] = -1; */ /*cancel_indx++;*/ //offproc_cnt[my_thread_num]++; /* if no repetition allowed */ /* off_proc_j[col_indx] = -1; col_indx -= k; break; */ /* } else { cnt1++; } } } col_indx += row_len; } else { col_indx += row_len; } }*/ /*hypre_AuxParCSRMatrixCancelIndx(aux_matrix) = cancel_indx;*/ /*} }*/ } } /* end parallel region */ } /*if (error_flag) { return hypre_error_flag; } if (aux_matrix) { for (i1=0; i1 < max_num_threads; i1++) { cancel_indx += offproc_cnt[i1]; } hypre_AuxParCSRMatrixCancelIndx(aux_matrix) = cancel_indx; }*/ //hypre_TFree(offproc_cnt, HYPRE_MEMORY_HOST); return hypre_error_flag; } /****************************************************************************** * * hypre_IJMatrixAddToValuesOMPParCSR * * adds row values to an IJMatrix * *****************************************************************************/ HYPRE_Int hypre_IJMatrixAddToValuesOMPParCSR( hypre_IJMatrix *matrix, HYPRE_Int nrows, HYPRE_Int *ncols, const HYPRE_BigInt *rows, const HYPRE_Int *row_indexes, const HYPRE_BigInt *cols, const HYPRE_Complex *values ) { hypre_ParCSRMatrix *par_matrix; hypre_CSRMatrix *diag, *offd; hypre_AuxParCSRMatrix *aux_matrix; HYPRE_BigInt *row_partitioning; HYPRE_BigInt *col_partitioning; MPI_Comm comm = hypre_IJMatrixComm(matrix); HYPRE_Int num_procs, my_id; HYPRE_BigInt col_0, col_n, first; HYPRE_BigInt **aux_j; HYPRE_Complex **aux_data; HYPRE_Int *row_length, *row_space; HYPRE_Int need_aux; HYPRE_Int pstart; HYPRE_Int *diag_i; HYPRE_Int *diag_j; HYPRE_Complex *diag_data; HYPRE_Int *offd_i; HYPRE_Int *offd_j; HYPRE_BigInt *big_offd_j; HYPRE_Complex *offd_data; HYPRE_Int current_num_elmts; HYPRE_Int max_off_proc_elmts; HYPRE_Int off_proc_i_indx; HYPRE_BigInt *off_proc_i; HYPRE_BigInt *off_proc_j; HYPRE_Complex *off_proc_data; HYPRE_Int **offproc_cnt; HYPRE_Int print_level = hypre_IJMatrixPrintLevel(matrix); HYPRE_Int max_num_threads; HYPRE_Int error_flag = 0; HYPRE_Int i1; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); max_num_threads = hypre_NumThreads(); par_matrix = (hypre_ParCSRMatrix*) hypre_IJMatrixObject( matrix ); row_partitioning = hypre_IJMatrixRowPartitioning(matrix); col_partitioning = hypre_IJMatrixColPartitioning(matrix); offproc_cnt = hypre_CTAlloc(HYPRE_Int *, max_num_threads, HYPRE_MEMORY_HOST); for (i1=0; i1 < max_num_threads; i1++) offproc_cnt[i1] = NULL; col_0 = col_partitioning[0]; col_n = col_partitioning[1]-1; first = hypre_IJMatrixGlobalFirstCol(matrix); pstart = 0; if (hypre_IJMatrixAssembleFlag(matrix)) /* matrix already assembled */ { HYPRE_Int num_cols_offd; HYPRE_BigInt *col_map_offd; diag = hypre_ParCSRMatrixDiag(par_matrix); diag_i = hypre_CSRMatrixI(diag); diag_j = hypre_CSRMatrixJ(diag); diag_data = hypre_CSRMatrixData(diag); offd = hypre_ParCSRMatrixOffd(par_matrix); offd_i = hypre_CSRMatrixI(offd); num_cols_offd = hypre_CSRMatrixNumCols(offd); if (num_cols_offd) { col_map_offd = hypre_ParCSRMatrixColMapOffd(par_matrix); offd_j = hypre_CSRMatrixJ(offd); offd_data = hypre_CSRMatrixData(offd); } aux_matrix = (hypre_AuxParCSRMatrix*) hypre_IJMatrixTranslator(matrix); if (aux_matrix) { current_num_elmts = hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix); off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix); off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix); off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix); } #ifdef HYPRE_USING_OPENMP #pragma omp parallel #endif { HYPRE_Int j_offd; HYPRE_Int num_threads, my_thread_num; HYPRE_Int len, rest, ns, ne; HYPRE_Int pos_diag, pos_offd; HYPRE_Int len_diag, len_offd; HYPRE_Int row_local; HYPRE_Int i, j, ii, n; HYPRE_BigInt row; HYPRE_Int not_found, size, indx; HYPRE_Int *my_offproc_cnt = NULL; num_threads = hypre_NumActiveThreads(); my_thread_num = hypre_GetThreadNum(); len = nrows/num_threads; rest = nrows - len*num_threads; if (my_thread_num < rest) { ns = my_thread_num*(len+1); ne = (my_thread_num+1)*(len+1); } else { ns = my_thread_num*len+rest; ne = (my_thread_num+1)*len+rest; } for (ii=ns; ii < ne; ii++) { row = rows[ii]; n = ncols ? ncols[ii] : 1; if (n == 0) /* empty row */ { continue; } indx = row_indexes[ii]; if (row >= row_partitioning[pstart] && row < row_partitioning[pstart+1]) { row_local = (HYPRE_Int)(row - row_partitioning[pstart]); /* compute local row number */ size = diag_i[row_local+1] - diag_i[row_local] + offd_i[row_local+1] - offd_i[row_local]; if (n > size) { hypre_error(HYPRE_ERROR_GENERIC); #ifdef HYPRE_USING_OPENMP #pragma omp atomic #endif error_flag++; if (print_level) { hypre_printf (" row %b too long! \n", row); } break; /*return hypre_error_flag; */ } pos_diag = diag_i[row_local]; pos_offd = offd_i[row_local]; len_diag = diag_i[row_local+1]; len_offd = offd_i[row_local+1]; not_found = 1; for (i=0; i < n; i++) { if (cols[indx] < col_0 || cols[indx] > col_n) /* insert into offd */ { j_offd = hypre_BigBinarySearch(col_map_offd,cols[indx]-first, num_cols_offd); if (j_offd == -1) { hypre_error(HYPRE_ERROR_GENERIC); #ifdef HYPRE_USING_OPENMP #pragma omp atomic #endif error_flag++; if (print_level) { hypre_printf (" Error, element %b %b does not exist\n", row, cols[indx]); } break; /*return hypre_error_flag;*/ } for (j=pos_offd; j < len_offd; j++) { if (offd_j[j] == j_offd) { offd_data[j] += values[indx]; not_found = 0; break; } } if (not_found) { hypre_error(HYPRE_ERROR_GENERIC); #ifdef HYPRE_USING_OPENMP #pragma omp atomic #endif error_flag++; if (print_level) { hypre_printf (" Error, element %b %b does not exist\n", row, cols[indx]); } break; /*return hypre_error_flag;*/ } not_found = 1; } /* diagonal element */ else if (cols[indx] == row) { if (diag_j[pos_diag] != row_local) { hypre_error(HYPRE_ERROR_GENERIC); #ifdef HYPRE_USING_OPENMP #pragma omp atomic #endif error_flag++; if (print_level) { hypre_printf (" Error, element %b %b does not exist\n", row, cols[indx]); } break; /*return hypre_error_flag;*/ } diag_data[pos_diag] += values[indx]; } else /* insert into diag */ { for (j=pos_diag; j < len_diag; j++) { if (diag_j[j] == (HYPRE_Int)(cols[indx]-col_0)) { diag_data[j] += values[indx]; not_found = 0; break; } } if (not_found) { hypre_error(HYPRE_ERROR_GENERIC); #ifdef HYPRE_USING_OPENMP #pragma omp atomic #endif error_flag++; if (print_level) { hypre_printf (" Error, element %b %b does not exist\n", row, cols[indx]); } break; /*return hypre_error_flag;*/ } } indx++; } } /* not my row */ /* need to find solution for threaded version!!!! */ /* could save row number and process later .... */ else { if (!my_offproc_cnt) { my_offproc_cnt = hypre_CTAlloc(HYPRE_Int, 200, HYPRE_MEMORY_HOST); offproc_cnt[my_thread_num] = my_offproc_cnt; my_offproc_cnt[0] = 200; my_offproc_cnt[1] = 2; } i = my_offproc_cnt[1]; if (i+2 < my_offproc_cnt[0]) { my_offproc_cnt[i] = ii; my_offproc_cnt[i+1] = indx; my_offproc_cnt[1] += 2; } else { size = my_offproc_cnt[0]; my_offproc_cnt = hypre_TReAlloc(my_offproc_cnt, HYPRE_Int, size+200, HYPRE_MEMORY_HOST); my_offproc_cnt[0] += 200; my_offproc_cnt[i] = ii; my_offproc_cnt[i+1] = indx; my_offproc_cnt[1] += 2; } } } } /* end parallel region */ } /* not assembled */ else { aux_matrix = (hypre_AuxParCSRMatrix*) hypre_IJMatrixTranslator(matrix); if (aux_matrix) { current_num_elmts = hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix); off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix); off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix); off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix); } row_space = hypre_AuxParCSRMatrixRowSpace(aux_matrix); row_length = hypre_AuxParCSRMatrixRowLength(aux_matrix); need_aux = hypre_AuxParCSRMatrixNeedAux(aux_matrix); if (need_aux) { aux_j = hypre_AuxParCSRMatrixAuxJ(aux_matrix); aux_data = hypre_AuxParCSRMatrixAuxData(aux_matrix); } else { diag = hypre_ParCSRMatrixDiag(par_matrix); diag_i = hypre_CSRMatrixI(diag); diag_j = hypre_CSRMatrixJ(diag); diag_data = hypre_CSRMatrixData(diag); offd = hypre_ParCSRMatrixOffd(par_matrix); offd_i = hypre_CSRMatrixI(offd); if (num_procs > 1) { big_offd_j = hypre_CSRMatrixBigJ(offd); offd_data = hypre_CSRMatrixData(offd); if (!big_offd_j) { big_offd_j = hypre_CTAlloc(HYPRE_BigInt, offd_i[hypre_CSRMatrixNumRows(offd)], hypre_CSRMatrixMemoryLocation(offd)); hypre_CSRMatrixBigJ(offd) = big_offd_j; } } } #ifdef HYPRE_USING_OPENMP #pragma omp parallel #endif { HYPRE_Int num_threads, my_thread_num; HYPRE_Int len, rest, ns, ne; HYPRE_BigInt *tmp_j = NULL; HYPRE_BigInt *local_j = NULL; HYPRE_Complex *tmp_data = NULL; HYPRE_Complex *local_data = NULL; HYPRE_Int tmp_indx; HYPRE_Int row_local; HYPRE_BigInt row; HYPRE_Int i, j, ii, n; HYPRE_Int not_found, size, indx; HYPRE_Int old_size, space, cnt; HYPRE_Int *my_offproc_cnt = NULL; num_threads = hypre_NumActiveThreads(); my_thread_num = hypre_GetThreadNum(); len = nrows/num_threads; rest = nrows - len*num_threads; if (my_thread_num < rest) { ns = my_thread_num*(len+1); ne = (my_thread_num+1)*(len+1); } else { ns = my_thread_num*len+rest; ne = (my_thread_num+1)*len+rest; } for (ii=ns; ii < ne; ii++) { row = rows[ii]; n = ncols ? ncols[ii] : 1; if (n == 0) /* empty row */ { continue; } indx = row_indexes[ii]; if (row >= row_partitioning[pstart] && row < row_partitioning[pstart+1]) { row_local = (HYPRE_Int)(row - row_partitioning[pstart]); /* compute local row number */ if (need_aux) { local_j = aux_j[row_local]; local_data = aux_data[row_local]; space = row_space[row_local]; old_size = row_length[row_local]; size = space - old_size; if (size < n) { size = n - size; tmp_j = hypre_CTAlloc(HYPRE_BigInt, size, HYPRE_MEMORY_HOST); tmp_data = hypre_CTAlloc(HYPRE_Complex, size, HYPRE_MEMORY_HOST); } tmp_indx = 0; not_found = 1; size = old_size; for (i=0; i < n; i++) { for (j=0; j < old_size; j++) { if (local_j[j] == cols[indx]) { local_data[j] += values[indx]; not_found = 0; break; } } if (not_found) { if (size < space) { local_j[size] = cols[indx]; local_data[size++] = values[indx]; } else { tmp_j[tmp_indx] = cols[indx]; tmp_data[tmp_indx++] = values[indx]; } } not_found = 1; indx++; } row_length[row_local] = size+tmp_indx; if (tmp_indx) { aux_j[row_local] = hypre_TReAlloc(aux_j[row_local],HYPRE_BigInt, size+tmp_indx, HYPRE_MEMORY_HOST); aux_data[row_local] = hypre_TReAlloc(aux_data[row_local], HYPRE_Complex, size+tmp_indx, HYPRE_MEMORY_HOST); row_space[row_local] = size+tmp_indx; local_j = aux_j[row_local]; local_data = aux_data[row_local]; } cnt = size; for (i=0; i < tmp_indx; i++) { local_j[cnt] = tmp_j[i]; local_data[cnt++] = tmp_data[i]; } if (tmp_j) { hypre_TFree(tmp_j, HYPRE_MEMORY_HOST); hypre_TFree(tmp_data, HYPRE_MEMORY_HOST); } } else /* insert immediately into data in ParCSRMatrix structure */ { HYPRE_Int offd_indx, diag_indx; HYPRE_Int offd_space, diag_space; HYPRE_Int cnt_diag, cnt_offd; offd_indx = hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local]; diag_indx = hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local]; cnt_diag = diag_indx; cnt_offd = offd_indx; diag_space = diag_i[row_local+1]; offd_space = offd_i[row_local+1]; not_found = 1; for (i=0; i < n; i++) { if (cols[indx] < col_0 || cols[indx] > col_n) /* insert into offd */ { for (j=offd_i[row_local]; j < offd_indx; j++) { if (big_offd_j[j] == cols[indx]) { offd_data[j] += values[indx]; not_found = 0; break; } } if (not_found) { if (cnt_offd < offd_space) { big_offd_j[cnt_offd] = cols[indx]; offd_data[cnt_offd++] = values[indx]; } else { hypre_error(HYPRE_ERROR_GENERIC); #ifdef HYPRE_USING_OPENMP #pragma omp atomic #endif error_flag++; if (print_level) { hypre_printf("Error in row %b ! Too many elements!\n", row); } break; /*return hypre_error_flag;*/ } } not_found = 1; } else /* insert into diag */ { for (j=diag_i[row_local]; j < diag_indx; j++) { if (diag_j[j] == (HYPRE_Int)(cols[indx]-col_0)) { diag_data[j] += values[indx]; not_found = 0; break; } } if (not_found) { if (cnt_diag < diag_space) { diag_j[cnt_diag] = (HYPRE_Int)(cols[indx]-col_0); diag_data[cnt_diag++] = values[indx]; } else { hypre_error(HYPRE_ERROR_GENERIC); #ifdef HYPRE_USING_OPENMP #pragma omp atomic #endif error_flag++; if (print_level) { hypre_printf("Error in row %b ! Too many elements !\n", row); } break; /*return hypre_error_flag;*/ } } not_found = 1; } indx++; } hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local] = cnt_diag; hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local] = cnt_offd; } } /* not my row */ else { if (!my_offproc_cnt) { my_offproc_cnt = hypre_CTAlloc(HYPRE_Int, 200, HYPRE_MEMORY_HOST); offproc_cnt[my_thread_num] = my_offproc_cnt; my_offproc_cnt[0] = 200; my_offproc_cnt[1] = 2; } i = my_offproc_cnt[1]; if (i+2 < my_offproc_cnt[0]) { my_offproc_cnt[i] = ii; my_offproc_cnt[i+1] = indx; my_offproc_cnt[1] += 2; } else { size = my_offproc_cnt[0]; my_offproc_cnt = hypre_TReAlloc(my_offproc_cnt, HYPRE_Int, size+200, HYPRE_MEMORY_HOST); my_offproc_cnt[0] += 200; my_offproc_cnt[i] = ii; my_offproc_cnt[i+1] = indx; my_offproc_cnt[1] += 2; } } } } /*end parallel region */ } if (error_flag) { return hypre_error_flag; } if (!aux_matrix) { HYPRE_Int size = (HYPRE_Int)(row_partitioning[pstart+1]-row_partitioning[pstart]); hypre_AuxParCSRMatrixCreate(&aux_matrix, size, size, NULL); hypre_AuxParCSRMatrixNeedAux(aux_matrix) = 0; hypre_IJMatrixTranslator(matrix) = aux_matrix; } for (i1 = 0; i1 < max_num_threads; i1++) { if (offproc_cnt[i1]) { HYPRE_Int *my_offproc_cnt = offproc_cnt[i1]; HYPRE_Int i, i2, ii, n, indx; HYPRE_BigInt row; for (i2 = 2; i2 < my_offproc_cnt[1]; i2+=2) { ii = my_offproc_cnt[i2]; row = rows[ii]; n = ncols ? ncols[ii] : 1; if (n == 0) /* empty row */ { continue; } indx = my_offproc_cnt[i2+1]; current_num_elmts = hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix); max_off_proc_elmts = hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix); off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix); off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix); off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix); off_proc_data = hypre_AuxParCSRMatrixOffProcData(aux_matrix); if (!max_off_proc_elmts) { max_off_proc_elmts = hypre_max(n,1000); hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix) = max_off_proc_elmts; hypre_AuxParCSRMatrixOffProcI(aux_matrix) = hypre_CTAlloc(HYPRE_BigInt, 2*max_off_proc_elmts, HYPRE_MEMORY_HOST); hypre_AuxParCSRMatrixOffProcJ(aux_matrix) = hypre_CTAlloc(HYPRE_BigInt, max_off_proc_elmts, HYPRE_MEMORY_HOST); hypre_AuxParCSRMatrixOffProcData(aux_matrix) = hypre_CTAlloc(HYPRE_Complex, max_off_proc_elmts, HYPRE_MEMORY_HOST); off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix); off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix); off_proc_data = hypre_AuxParCSRMatrixOffProcData(aux_matrix); } else if (current_num_elmts + n > max_off_proc_elmts) { max_off_proc_elmts += 3*n; off_proc_i = hypre_TReAlloc(off_proc_i, HYPRE_BigInt, 2*max_off_proc_elmts, HYPRE_MEMORY_HOST); off_proc_j = hypre_TReAlloc(off_proc_j, HYPRE_BigInt, max_off_proc_elmts, HYPRE_MEMORY_HOST); off_proc_data = hypre_TReAlloc(off_proc_data,HYPRE_Complex, max_off_proc_elmts, HYPRE_MEMORY_HOST); hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix) = max_off_proc_elmts; hypre_AuxParCSRMatrixOffProcI(aux_matrix) = off_proc_i; hypre_AuxParCSRMatrixOffProcJ(aux_matrix) = off_proc_j; hypre_AuxParCSRMatrixOffProcData(aux_matrix) = off_proc_data; } off_proc_i[off_proc_i_indx++] = row; off_proc_i[off_proc_i_indx++] = n; for (i=0; i < n; i++) { off_proc_j[current_num_elmts] = cols[indx]; off_proc_data[current_num_elmts++] = values[indx++]; } hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix) = off_proc_i_indx; hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix) = current_num_elmts; } hypre_TFree(offproc_cnt[i1], HYPRE_MEMORY_HOST); } } hypre_TFree(offproc_cnt, HYPRE_MEMORY_HOST); return hypre_error_flag; }
crc32_fmt_plug.c
/* * This file is part of John the Ripper password cracker, * * Written by Jim Fougeron <jfoug at cox.net> in 2011. No copyright * is claimed, and the software is hereby placed in the public domain. * In case this attempt to disclaim copyright and place the software in the * public domain is deemed null and void, then the software is * Copyright (c) 2011 Jim Fougeron and it is hereby released to the * general public under the following terms: * * Redistribution and use in source and binary forms, with or without * modification, are permitted. * * There's ABSOLUTELY NO WARRANTY, express or implied. * * This format is: 8hex:8hex The first 8 hex is the 'starting' crc value * So, if you have a file and its CRC is XYZ, then you would put that value * here, then when the password(s) are found, append them to the file, and get * the final CRC value. If you want to find a password with the 'proper' CRC * value, then put 0 into the first field. * * The 2nd 8 hex value is what we are looking for. * * If you want alternate plaintexts, run with --keep-guessing option. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_crc32; #elif FMT_REGISTERS_H john_register_one(&fmt_crc32); #else /* Uncomment to try out a non-SSE4.2 build (bench with -cost=1:1) */ //#undef __SSE4_2__ //#undef __AVX2__ #include <string.h> #include "common.h" #include "formats.h" #include "crc32.h" #include "loader.h" #if !FAST_FORMATS_OMP #undef _OPENMP #endif #ifdef _OPENMP #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 256 // tuned on core i7 #endif #endif #include "memdbg.h" #define FORMAT_LABEL "CRC32" #define FORMAT_NAME "" #define ALGORITHM_NAME "CRC32 32/" ARCH_BITS_STR " CRC-32C " CRC32_C_ALGORITHM_NAME #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH 0 #define PLAINTEXT_LENGTH 31 #define BINARY_SIZE 4 #define BINARY_ALIGN 4 #define SALT_SIZE 5 #define SALT_ALIGN 4 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 8192 // per thread static struct fmt_tests tests[] = { {"$crc32$00000000.fa455f6b", "ripper"}, {"$crc32$00000000.4ff4f23f", "dummy"}, // {"$crc32$00000000.00000000", ""}, // this one ends up skewing the benchmark time, WAY too much. {"$crc32$4ff4f23f.ce6eb863", "password"}, // this would be for file with contents: 'dummy' and we want to find a password to append that is 'password' {"$crc32$fa455f6b.c59b2aeb", "123456"}, // ripper123456 {"$crc32c$00000000.98a61e94", "ripper"}, {"$crc32c$00000000.d62b95de", "dummy"}, // {"$crc32c$00000000.00000000", ""}, // this one ends up skewing the benchmark time, WAY too much. {"$crc32c$d62b95de.1439c9f9", "password"}, // this would be for file with contents: 'dummy' and we want to find a password to append that is 'password' {"$crc32c$98a61e94.77f23179", "123456"}, // ripper123456 {NULL} }; static struct fmt_main *pFmt; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static CRC32_t (*crcs); static CRC32_t crcsalt; static unsigned int crctype; static void init(struct fmt_main *self) { #ifdef _OPENMP int n = omp_get_max_threads(); if (n > 4) { n = 4; // it just won't scale further omp_set_num_threads(n); } self->params.max_keys_per_crypt *= (n*OMP_SCALE); #endif //printf("Using %u x %u = %u keys per crypt\n", MAX_KEYS_PER_CRYPT, n, self->params.max_keys_per_crypt); saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); crcs = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crcs)); pFmt = self; } static void done(void) { MEM_FREE(crcs); MEM_FREE(saved_key); } static int valid(char *ciphertext, struct fmt_main *self) { char *p, *q; int i; if (strncmp(ciphertext, "$crc32$", 7) && strncmp(ciphertext, "$crc32c$", 8)) return 0; p = strrchr(ciphertext, '$'); q = strchr(p, '.'); if (!q || q-p != 9) return 0; for (i = 0; i < 8; ++i) { int c1 = ARCH_INDEX(p[1+i]); int c2 = ARCH_INDEX(p[10+i]); if (atoi16[c1] == 0x7F || atoi16[c2] == 0x7F) return 0; /* We don't support uppercase hex digits here, or else we'd need to implement * split() and set FMT_SPLIT_UNIFIES_CASE. */ if (c1 >= 'A' && c1 <= 'F') return 0; if (c2 >= 'A' && c2 <= 'F') return 0; } return 1; } static int get_hash_0(int index) { return crcs[index] & PH_MASK_0; } static int get_hash_1(int index) { return crcs[index] & PH_MASK_1; } static int get_hash_2(int index) { return crcs[index] & PH_MASK_2; } static int get_hash_3(int index) { return crcs[index] & PH_MASK_3; } static int get_hash_4(int index) { return crcs[index] & PH_MASK_4; } static int get_hash_5(int index) { return crcs[index] & PH_MASK_5; } static int get_hash_6(int index) { return crcs[index] & PH_MASK_6; } static void *get_binary(char *ciphertext) { static ARCH_WORD_32 *out; char *p; if (!out) out = mem_alloc_tiny(sizeof(ARCH_WORD_32), MEM_ALIGN_WORD); p = strchr(ciphertext, '.'); sscanf(&p[1], "%x", out); // Performing the complement here, allows us to not have to complement // at the end of each crypt_all call. *out = ~(*out); return out; } static void *get_salt(char *ciphertext) { static ARCH_WORD_32 *out; char *cp; if (!out) out = mem_alloc_tiny(sizeof(ARCH_WORD_32)*2, MEM_ALIGN_WORD); cp = strrchr(ciphertext, '$'); sscanf(&cp[1], "%x", out); // since we ask for the crc of a file, or zero, we need to complement here, // to get it into 'proper' working order. *out = ~(*out); if (!strncmp(ciphertext, "$crc32$", 7)) ((char*)out)[4] = 0; else ((char*)out)[4] = 1; return out; } static void set_key(char *key, int index) { char *p = saved_key[index]; while ( (*p++ = *key++) ) ; } static char *get_key(int index) { return saved_key[index]; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int i; switch (crctype) { case 0: #ifdef _OPENMP #pragma omp parallel for private(i) #endif for (i = 0; i < count; ++i) { CRC32_t crc = crcsalt; unsigned char *p = (unsigned char*)saved_key[i]; while (*p) crc = jtr_crc32(crc, *p++); crcs[i] = crc; //printf("%s() In: '%s' Out: %08x\n", __FUNCTION__, saved_key[i], ~crc); } break; case 1: #ifdef _OPENMP #pragma omp parallel for private(i) #endif for (i = 0; i < count; ++i) { CRC32_t crc = crcsalt; unsigned char *p = (unsigned char*)saved_key[i]; while (*p) crc = jtr_crc32c(crc, *p++); crcs[i] = crc; //printf("%s() In: '%s' Out: %08x\n", __FUNCTION__, saved_key[i], ~crc); } } return count; } static void set_salt(void *salt) { crcsalt = *((ARCH_WORD_32 *)salt); crctype = ((char*)salt)[4]; } static int cmp_all(void *binary, int count) { ARCH_WORD_32 crc=*((ARCH_WORD_32*)binary), i; for (i = 0; i < count; ++i) if (crc == crcs[i]) return 1; return 0; } static int cmp_one(void *binary, int index) { return *((ARCH_WORD_32*)binary) == crcs[index]; } static int cmp_exact(char *source, int index) { return 1; } static int salt_hash(void *salt) { return *(ARCH_WORD_32*)salt & (SALT_HASH_SIZE - 1); } static unsigned int crc32_ver(void *salt) { char *my_salt = (char*)salt; return (unsigned int)my_salt[4]; } struct fmt_main fmt_crc32 = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, #ifdef _OPENMP FMT_OMP | FMT_OMP_BAD | #endif FMT_CASE | FMT_8_BIT, { "version: 0 = CRC-32, 1 = CRC-32C", }, tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt, { crc32_ver, }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
pdgeswp.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/pzgeswp.c, normal z -> d, Fri Sep 28 17:38:11 2018 * **/ #include "plasma_async.h" #include "plasma_descriptor.h" #include "plasma_internal.h" #include "plasma_types.h" #include <plasma_core_blas.h> #define A(m, n) (double*)plasma_tile_addr(A, m, n) /******************************************************************************/ void plasma_pdgeswp(plasma_enum_t colrow, plasma_desc_t A, int *ipiv, int incx, plasma_sequence_t *sequence, plasma_request_t *request) { // Return if failed sequence. if (sequence->status != PlasmaSuccess) return; if (colrow == PlasmaRowwise) { for (int n = 0; n < A.nt; n++) { double *a00, *a10; a00 = A(0, n); a10 = A(A.mt-1, n); // Multidependency of the whole panel on its individual tiles. for (int m = 1; m < A.mt-1; m++) { double *amn = A(m, n); #pragma omp task depend (in:amn[0]) \ depend (inout:a00[0]) { int l = 1; l++; } } int ma00 = (A.mt-1)*A.mb; int na00 = plasma_tile_nmain(A, n); int lda10 = plasma_tile_mmain(A, A.mt-1); int nva10 = plasma_tile_nview(A, n); #pragma omp task depend (in:ipiv[0:A.m]) \ depend (inout:a00[0:ma00*na00]) \ depend (inout:a10[0:lda10*nva10]) { int nvan = plasma_tile_nview(A, n); plasma_desc_t view = plasma_desc_view(A, 0, n*A.nb, A.m, nvan); plasma_core_dgeswp(colrow, view, 1, A.m, ipiv, incx); } // Multidependency of individual tiles on the whole panel. for (int m = 1; m < A.mt-1; m++) { double *amn = A(m, n); #pragma omp task depend (in:a00[0]) \ depend (inout:amn[0]) { int l = 1; l++; } } } } else { // PlasmaColumnwise for (int m = 0; m < A.mt; m++) { double *a00, *a01; a00 = A(m, 0); a01 = A(m, A.nt-1); // Multidependency of the whole (row) panel on its individual tiles. for (int n = 1; n < A.nt-1; n++) { double *amn = A(m, n); #pragma omp task depend (in:amn[0]) \ depend (inout:a00[0]) { int l = 1; l++; } } #pragma omp task depend (in:ipiv[0:A.n]) \ depend (inout:a00[0]) \ depend (inout:a01[0]) { int mvam = plasma_tile_mview(A, m); plasma_desc_t view = plasma_desc_view(A, m*A.mb, 0, mvam, A.n); plasma_core_dgeswp(colrow, view, 1, A.n, ipiv, incx); } // Multidependency of individual tiles on the whole (row) panel. for (int n = 1; n < A.nt-1; n++) { double *amn = A(m, n); #pragma omp task depend (in:a00[0]) \ depend (inout:amn[0]) { int l = 1; l++; } } } } }
count_if.h
#pragma once ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////// inline int get_elements_count(int *_data, int _size, int _desired_value) { int count = 0; #pragma _NEC vector #pragma omp parallel for reduction(+: count) for(int i = 0; i < _size; i++) { int val = 0; if(_data[i] == _desired_value) val = 1; count += val; } return count; } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
shared-clauseModificado.c
/* * shared-clause.c * * Created on: 02/04/2014 * Author: Carlos de la Torre */ #include <stdio.h> #ifdef _OPENMP #include <omp.h> #endif int main() { int i, n = 7; int a[n]; for (i = 0; i < n; i++) a[i] = i + 1; #pragma omp parallel for shared(a,n) default(none) for (i = 0; i < n; i++) a[i] += i; printf("Después de parallel for:\n"); for (i = 0; i < n; i++) printf("a[%d] = %d\n", i, a[i]); return 0; }
scale_channels_layer.c
#include "scale_channels_layer.h" #include "dark_cuda.h" #include "blas.h" #include <stdio.h> #include <assert.h> layer make_scale_channels_layer(int batch, int index, int w, int h, int c, int w2, int h2, int c2) { fprintf(stderr,"scale Layer: %d\n", index); layer l = { (LAYER_TYPE)0 }; l.type = SCALE_CHANNELS; l.batch = batch; l.w = w; l.h = h; l.c = c; assert(w == 1 && h == 1); l.out_w = w2; l.out_h = h2; l.out_c = c2; assert(l.out_c == l.c); l.outputs = l.out_w*l.out_h*l.out_c; l.inputs = l.outputs; l.index = index; l.delta = (float*)calloc(l.outputs * batch, sizeof(float)); l.output = (float*)calloc(l.outputs * batch, sizeof(float)); l.forward = forward_scale_channels_layer; l.backward = backward_scale_channels_layer; #ifdef GPU l.forward_gpu = forward_scale_channels_layer_gpu; l.backward_gpu = backward_scale_channels_layer_gpu; l.delta_gpu = cuda_make_array(l.delta, l.outputs*batch); l.output_gpu = cuda_make_array(l.output, l.outputs*batch); #endif return l; } void resize_scale_channels_layer(layer *l, int w, int h) { l->out_w = w; l->out_h = h; l->outputs = l->out_w*l->out_h*l->out_c; l->inputs = l->outputs; l->delta = (float*)realloc(l->delta, l->outputs * l->batch * sizeof(float)); l->output = (float*)realloc(l->output, l->outputs * l->batch * sizeof(float)); #ifdef GPU cuda_free(l->output_gpu); cuda_free(l->delta_gpu); l->output_gpu = cuda_make_array(l->output, l->outputs*l->batch); l->delta_gpu = cuda_make_array(l->delta, l->outputs*l->batch); #endif } void forward_scale_channels_layer(const layer l, network_state state) { int size = l.batch * l.out_c * l.out_w * l.out_h; int channel_size = l.out_w * l.out_h; float *from_output = state.net.layers[l.index].output; int i; #pragma omp parallel for for (i = 0; i < size; ++i) { l.output[i] = state.input[i / channel_size] * from_output[i]; } activate_array(l.output, l.outputs*l.batch, l.activation); } void backward_scale_channels_layer(const layer l, network_state state) { gradient_array(l.output, l.outputs*l.batch, l.activation, l.delta); //axpy_cpu(l.outputs*l.batch, 1, l.delta, 1, state.delta, 1); //scale_cpu(l.batch, l.out_w, l.out_h, l.out_c, l.delta, l.w, l.h, l.c, state.net.layers[l.index].delta); int size = l.batch * l.out_c * l.out_w * l.out_h; int channel_size = l.out_w * l.out_h; float *from_output = state.net.layers[l.index].output; float *from_delta = state.net.layers[l.index].delta; int i; #pragma omp parallel for for (i = 0; i < size; ++i) { state.delta[i / channel_size] += l.delta[i] * from_output[i] / channel_size; // l.delta * from (should be divided by channel_size?) from_delta[i] += state.input[i / channel_size] * l.delta[i]; // input * l.delta } } #ifdef GPU void forward_scale_channels_layer_gpu(const layer l, network_state state) { int size = l.batch * l.out_c * l.out_w * l.out_h; int channel_size = l.out_w * l.out_h; scale_channels_gpu(state.net.layers[l.index].output_gpu, size, channel_size, state.input, l.output_gpu); activate_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation); } void backward_scale_channels_layer_gpu(const layer l, network_state state) { gradient_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation, l.delta_gpu); int size = l.batch * l.out_c * l.out_w * l.out_h; int channel_size = l.out_w * l.out_h; float *from_output = state.net.layers[l.index].output_gpu; float *from_delta = state.net.layers[l.index].delta_gpu; backward_scale_channels_gpu(l.delta_gpu, size, channel_size, state.input, from_delta, from_output, state.delta); } #endif
5310.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4000. */ #include "atax.h" /* Array initialization. */ static void init_array (int nx, int ny, DATA_TYPE POLYBENCH_2D(A,NX,NY,nx,ny), DATA_TYPE POLYBENCH_1D(x,NY,ny)) { int i, j; for (i = 0; i < ny; i++) x[i] = i * M_PI; for (i = 0; i < nx; i++) for (j = 0; j < ny; j++) A[i][j] = ((DATA_TYPE) i*(j+1)) / nx; } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int nx, DATA_TYPE POLYBENCH_1D(y,NX,nx)) { int i; for (i = 0; i < nx; i++) { fprintf (stderr, DATA_PRINTF_MODIFIER, y[i]); if (i % 20 == 0) fprintf (stderr, "\n"); } fprintf (stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_atax(int nx, int ny, DATA_TYPE POLYBENCH_2D(A,NX,NY,nx,ny), DATA_TYPE POLYBENCH_1D(x,NY,ny), DATA_TYPE POLYBENCH_1D(y,NY,ny), DATA_TYPE POLYBENCH_1D(tmp,NX,nx)) { int i, j; #pragma scop { #pragma omp target teams distribute schedule(dynamic, 8) for (i = 0; i < _PB_NY; i++) { y[i] = 0; } #pragma omp target teams distribute schedule(dynamic, 8) for (i = 0; i < _PB_NX; i++) { tmp[i] = 0; for (j = 0; j < _PB_NY; j++) tmp[i] = tmp[i] + A[i][j] * x[j]; for (j = 0; j < _PB_NY; j++) y[j] = y[j] + A[i][j] * tmp[i]; } } #pragma endscop } int main(int argc, char** argv) { /* Retrieve problem size. */ int nx = NX; int ny = NY; /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NX, NY, nx, ny); POLYBENCH_1D_ARRAY_DECL(x, DATA_TYPE, NY, ny); POLYBENCH_1D_ARRAY_DECL(y, DATA_TYPE, NY, ny); POLYBENCH_1D_ARRAY_DECL(tmp, DATA_TYPE, NX, nx); /* Initialize array(s). */ init_array (nx, ny, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(x)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_atax (nx, ny, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(x), POLYBENCH_ARRAY(y), POLYBENCH_ARRAY(tmp)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(nx, POLYBENCH_ARRAY(y))); /* Be clean. */ POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(x); POLYBENCH_FREE_ARRAY(y); POLYBENCH_FREE_ARRAY(tmp); return 0; }
trmv_x_csc_n_lo.c
#include "alphasparse/kernel.h" #include "alphasparse/util.h" #include "alphasparse/opt.h" #ifdef _OPENMP #include <omp.h> #endif static alphasparse_status_t trmv_csc_n_lo_omp(const ALPHA_Number alpha, const ALPHA_SPMAT_CSC *A, const ALPHA_Number *x, const ALPHA_Number beta, ALPHA_Number *y) { const ALPHA_INT m = A->rows; const ALPHA_INT n = A->cols; if(m != n) return ALPHA_SPARSE_STATUS_INVALID_VALUE; const ALPHA_INT thread_num = alpha_get_thread_num(); ALPHA_INT partition[thread_num + 1]; balanced_partition_row_by_nnz(A->cols_end, n, thread_num, partition); ALPHA_Number** tmp = (ALPHA_Number**)malloc(sizeof(ALPHA_Number*) * thread_num); #ifdef _OPENMP #pragma omp parallel num_threads(thread_num) #endif { const ALPHA_INT tid = alpha_get_thread_id(); const ALPHA_INT local_n_s = partition[tid]; const ALPHA_INT local_n_e = partition[tid + 1]; tmp[tid] = (ALPHA_Number*)malloc(sizeof(ALPHA_Number) * m); for(ALPHA_INT j = 0; j < m; ++j) { alpha_setzero(tmp[tid][j]); } for(ALPHA_INT i = local_n_s; i < local_n_e; ++i) { const ALPHA_Number x_r = x[i]; register ALPHA_Number tmp_t; alpha_setzero(tmp_t); ALPHA_INT cs = A->cols_start[i]; ALPHA_INT ce = A->cols_end[i]; for(; cs < ce-3; cs += 4) { const ALPHA_INT row_0 = A->row_indx[cs]; const ALPHA_INT row_1 = A->row_indx[cs+1]; const ALPHA_INT row_2 = A->row_indx[cs+2]; const ALPHA_INT row_3 = A->row_indx[cs+3]; if(row_0 >= i) { alpha_mul(tmp_t, A->values[cs], x_r); alpha_madde(tmp[tid][row_0], alpha, tmp_t); alpha_mul(tmp_t, A->values[cs+1], x_r); alpha_madde(tmp[tid][row_1], alpha, tmp_t); alpha_mul(tmp_t, A->values[cs+2], x_r); alpha_madde(tmp[tid][row_2], alpha, tmp_t); alpha_mul(tmp_t, A->values[cs+3], x_r); alpha_madde(tmp[tid][row_3], alpha, tmp_t); }else if (row_1 >= i){ alpha_mul(tmp_t, A->values[cs+1], x_r); alpha_madde(tmp[tid][row_1], alpha, tmp_t); alpha_mul(tmp_t, A->values[cs+2], x_r); alpha_madde(tmp[tid][row_2], alpha, tmp_t); alpha_mul(tmp_t, A->values[cs+3], x_r); alpha_madde(tmp[tid][row_3], alpha, tmp_t); }else if (row_2 >= i){ alpha_mul(tmp_t, A->values[cs+2], x_r); alpha_madde(tmp[tid][row_2], alpha, tmp_t); alpha_mul(tmp_t, A->values[cs+3], x_r); alpha_madde(tmp[tid][row_3], alpha, tmp_t); }else if (row_3 >= i){ alpha_mul(tmp_t, A->values[cs+3], x_r); alpha_madde(tmp[tid][row_3], alpha, tmp_t); } } for (;cs < ce;++cs) { const ALPHA_INT row = A->row_indx[cs]; if (row >= i){ alpha_mul(tmp_t, A->values[cs], x_r); alpha_madde(tmp[tid][row], alpha, tmp_t); } } } } #ifdef _OPENMP #pragma omp parallel for num_threads(thread_num) #endif for(ALPHA_INT i = 0; i < m; ++i) { ALPHA_Number tmp_y; alpha_setzero(tmp_y); for(ALPHA_INT j = 0; j < thread_num; ++j) { alpha_add(tmp_y, tmp_y, tmp[j][i]); // tmp_y += tmp[j][i]; } alpha_madde(tmp_y, y[i], beta); y[i] = tmp_y; // y[i] = y[i] * beta + tmp_y; } #ifdef _OPENMP #pragma omp parallel for num_threads(thread_num) #endif for(ALPHA_INT i = 0; i < thread_num; ++i) { free(tmp[i]); } free(tmp); return ALPHA_SPARSE_STATUS_SUCCESS; } alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_CSC *A, const ALPHA_Number *x, const ALPHA_Number beta, ALPHA_Number *y) { return trmv_csc_n_lo_omp(alpha, A, x, beta, y); }
convolution_3x3_int8.h
// BUG1989 is pleased to support the open source community by supporting ncnn available. // // Copyright (C) 2019 BUG1989. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_int8_sse(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const signed char *kernel = _kernel; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { Mat out0 = top_blob.channel(p); out0.fill(0); const signed char *kernel0 = (const signed char *)kernel + p * inch * 9; for (int q = 0; q < inch; q++) { int *outptr0 = out0; const signed char *img0 = bottom_blob.channel(q); const signed char *r0 = img0; const signed char *r1 = img0 + w; const signed char *r2 = img0 + w * 2; for (int i = 0; i < outh; i++) { int remain = outw; for (; remain > 0; remain--) { int sum0 = 0; sum0 += (int)r0[0] * kernel0[0]; sum0 += (int)r0[1] * kernel0[1]; sum0 += (int)r0[2] * kernel0[2]; sum0 += (int)r1[0] * kernel0[3]; sum0 += (int)r1[1] * kernel0[4]; sum0 += (int)r1[2] * kernel0[5]; sum0 += (int)r2[0] * kernel0[6]; sum0 += (int)r2[1] * kernel0[7]; sum0 += (int)r2[2] * kernel0[8]; *outptr0 += sum0; r0++; r1++; r2++; outptr0++; } r0 += 2; r1 += 2; r2 += 2; } kernel0 += 9; } } } static void conv3x3s1_winograd23_transform_kernel_int8_sse(const Mat& kernel, Mat& kernel_tm, int inch, int outch) { kernel_tm.create(4*4, inch, outch, 2ul); // G const short ktm[4][3] = { { 2, 0, 0}, { 1, 1, 1}, { 1, -1, 1}, { 0, 0, 2} }; #pragma omp parallel for for (int p = 0; p<outch; p++) { for (int q = 0; q<inch; q++) { const signed char* kernel0 = (const signed char*)kernel + p*inch * 9 + q * 9; short* kernel_tm0 = kernel_tm.channel(p).row<short>(q); // transform kernel const signed char* k0 = kernel0; const signed char* k1 = kernel0 + 3; const signed char* k2 = kernel0 + 6; // h short tmp[4][3]; for (int i=0; i<4; i++) { tmp[i][0] = (short)k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = (short)k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = (short)k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // U for (int j=0; j<4; j++) { short* tmpp = &tmp[j][0]; for (int i=0; i<4; i++) { kernel_tm0[j*4 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } } static void conv3x3s1_winograd23_int8_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 2n+2, winograd F(2,3) Mat bottom_blob_bordered = bottom_blob; outw = (outw + 1) / 2 * 2; outh = (outh + 1) / 2 * 2; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f, opt.workspace_allocator, opt.num_threads); // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 2 * 4; int h_tm = outh / 2 * 4; int nColBlocks = h_tm/4; // may be the block num in Feathercnn int nRowBlocks = w_tm/4; const int tiles = nColBlocks * nRowBlocks; bottom_blob_tm.create(4*4, tiles, inch, 2u, opt.workspace_allocator); // BT // const float itm[4][4] = { // {1.0f, 0.0f, -1.0f, 0.0f}, // {0.0f, 1.0f, 1.00f, 0.0f}, // {0.0f, -1.0f, 1.00f, 0.0f}, // {0.0f, -1.0f, 0.00f, 1.0f} // }; #pragma omp parallel for num_threads(opt.num_threads) for (int q=0; q<inch; q++) { const signed char* img = bottom_blob_bordered.channel(q); short* out_tm0 = bottom_blob_tm.channel(q); for (int j = 0; j < nColBlocks; j++) { const signed char* r0 = img + w * j * 2; const signed char* r1 = r0 + w; const signed char* r2 = r1 + w; const signed char* r3 = r2 + w; for (int i = 0; i < nRowBlocks; i++) { short d0[4],d1[4],d2[4],d3[4]; short w0[4],w1[4],w2[4],w3[4]; short t0[4],t1[4],t2[4],t3[4]; // load for (int n = 0; n < 4; n++) { d0[n] = r0[n]; d1[n] = r1[n]; d2[n] = r2[n]; d3[n] = r3[n]; } // w = B_t * d for (int n = 0; n < 4; n++) { w0[n] = d0[n] - d2[n]; w1[n] = d1[n] + d2[n]; w2[n] = d2[n] - d1[n]; w3[n] = d3[n] - d1[n]; } // transpose d to d_t { t0[0]=w0[0]; t1[0]=w0[1]; t2[0]=w0[2]; t3[0]=w0[3]; t0[1]=w1[0]; t1[1]=w1[1]; t2[1]=w1[2]; t3[1]=w1[3]; t0[2]=w2[0]; t1[2]=w2[1]; t2[2]=w2[2]; t3[2]=w2[3]; t0[3]=w3[0]; t1[3]=w3[1]; t2[3]=w3[2]; t3[3]=w3[3]; } // U = B_t * d_t for (int n = 0; n < 4; n++) { d0[n] = t0[n] - t2[n]; d1[n] = t1[n] + t2[n]; d2[n] = t2[n] - t1[n]; d3[n] = t3[n] - t1[n]; } // save to out_tm for (int n = 0; n < 4; n++) { out_tm0[n ] = d0[n]; out_tm0[n+ 4] = d1[n]; out_tm0[n+ 8] = d2[n]; out_tm0[n+12] = d3[n]; } r0 += 2; r1 += 2; r2 += 2; r3 += 2; out_tm0 += 16; } } } } bottom_blob_bordered = Mat(); // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 2 * 4; int h_tm = outh / 2 * 4; int nColBlocks = h_tm/4; // may be the block num in Feathercnn int nRowBlocks = w_tm/4; const int tiles = nColBlocks * nRowBlocks; top_blob_tm.create(16, tiles, outch, 4u, opt.workspace_allocator); int nn_outch = outch >> 2; int remain_outch_start = nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int pp=0; pp<nn_outch; pp++) { int p = pp * 4; Mat out0_tm = top_blob_tm.channel(p); Mat out1_tm = top_blob_tm.channel(p+1); Mat out2_tm = top_blob_tm.channel(p+2); Mat out3_tm = top_blob_tm.channel(p+3); const Mat kernel0_tm = kernel_tm.channel(p); const Mat kernel1_tm = kernel_tm.channel(p+1); const Mat kernel2_tm = kernel_tm.channel(p+2); const Mat kernel3_tm = kernel_tm.channel(p+3); for (int i=0; i<tiles; i++) { int* output0_tm = out0_tm.row<int>(i); int* output1_tm = out1_tm.row<int>(i); int* output2_tm = out2_tm.row<int>(i); int* output3_tm = out3_tm.row<int>(i); int sum0[16] = {0}; int sum1[16] = {0}; int sum2[16] = {0}; int sum3[16] = {0}; int q = 0; for (; q+3<inch; q+=4) { const short* r0 = bottom_blob_tm.channel(q).row<short>(i); const short* r1 = bottom_blob_tm.channel(q+1).row<short>(i); const short* r2 = bottom_blob_tm.channel(q+2).row<short>(i); const short* r3 = bottom_blob_tm.channel(q+3).row<short>(i); const short* k0 = kernel0_tm.row<short>(q); const short* k1 = kernel1_tm.row<short>(q); const short* k2 = kernel2_tm.row<short>(q); const short* k3 = kernel3_tm.row<short>(q); for (int n=0; n<16; n++) { sum0[n] += (int)r0[n] * k0[n]; k0 += 16; sum0[n] += (int)r1[n] * k0[n]; k0 += 16; sum0[n] += (int)r2[n] * k0[n]; k0 += 16; sum0[n] += (int)r3[n] * k0[n]; k0 -= 16 * 3; sum1[n] += (int)r0[n] * k1[n]; k1 += 16; sum1[n] += (int)r1[n] * k1[n]; k1 += 16; sum1[n] += (int)r2[n] * k1[n]; k1 += 16; sum1[n] += (int)r3[n] * k1[n]; k1 -= 16 * 3; sum2[n] += (int)r0[n] * k2[n]; k2 += 16; sum2[n] += (int)r1[n] * k2[n]; k2 += 16; sum2[n] += (int)r2[n] * k2[n]; k2 += 16; sum2[n] += (int)r3[n] * k2[n]; k2 -= 16 * 3; sum3[n] += (int)r0[n] * k3[n]; k3 += 16; sum3[n] += (int)r1[n] * k3[n]; k3 += 16; sum3[n] += (int)r2[n] * k3[n]; k3 += 16; sum3[n] += (int)r3[n] * k3[n]; k3 -= 16 * 3; } } for (; q<inch; q++) { const short* r0 = bottom_blob_tm.channel(q).row<short>(i); const short* k0 = kernel0_tm.row<short>(q); const short* k1 = kernel1_tm.row<short>(q); const short* k2 = kernel2_tm.row<short>(q); const short* k3 = kernel3_tm.row<short>(q); for (int n=0; n<16; n++) { sum0[n] += (int)r0[n] * k0[n]; sum1[n] += (int)r0[n] * k1[n]; sum2[n] += (int)r0[n] * k2[n]; sum3[n] += (int)r0[n] * k3[n]; } } for (int n=0; n<16; n++) { output0_tm[n] = sum0[n]; output1_tm[n] = sum1[n]; output2_tm[n] = sum2[n]; output3_tm[n] = sum3[n]; } } } #pragma omp parallel for num_threads(opt.num_threads) for (int p=remain_outch_start; p<outch; p++) { Mat out0_tm = top_blob_tm.channel(p); const Mat kernel0_tm = kernel_tm.channel(p); for (int i=0; i<tiles; i++) { int* output0_tm = out0_tm.row<int>(i); int sum0[16] = {0}; int q = 0; for (; q+3<inch; q+=4) { const short* r0 = bottom_blob_tm.channel(q).row<short>(i); const short* r1 = bottom_blob_tm.channel(q+1).row<short>(i); const short* r2 = bottom_blob_tm.channel(q+2).row<short>(i); const short* r3 = bottom_blob_tm.channel(q+3).row<short>(i); const short* k0 = kernel0_tm.row<short>(q); const short* k1 = kernel0_tm.row<short>(q+1); const short* k2 = kernel0_tm.row<short>(q+2); const short* k3 = kernel0_tm.row<short>(q+3); for (int n=0; n<16; n++) { sum0[n] += (int)r0[n] * k0[n]; sum0[n] += (int)r1[n] * k1[n]; sum0[n] += (int)r2[n] * k2[n]; sum0[n] += (int)r3[n] * k3[n]; } } for (; q<inch; q++) { const short* r0 = bottom_blob_tm.channel(q).row<short>(i); const short* k0 = kernel0_tm.row<short>(q); for (int n=0; n<16; n++) { sum0[n] += (int)r0[n] * k0[n]; } } for (int n=0; n<16; n++) { output0_tm[n] = sum0[n]; } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; top_blob_bordered.create(outw, outh, outch, 4u, opt.workspace_allocator); { // AT // const float itm[2][4] = { // {1.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 1.0f} // }; int w_tm = outw / 2 * 4; int h_tm = outh / 2 * 4; int nColBlocks = h_tm/4; // may be the block num in Feathercnn int nRowBlocks = w_tm/4; #pragma omp parallel for num_threads(opt.num_threads) for (int p=0; p<outch; p++) { Mat out_tm = top_blob_tm.channel(p); Mat out = top_blob_bordered.channel(p); for (int j=0; j<nColBlocks; j++) { int* outRow0 = out.row<int>(j*2); int* outRow1 = out.row<int>(j*2+1); for(int i=0; i<nRowBlocks; i++) { int* out_tile = out_tm.row<int>(j*nRowBlocks + i); int s0[4],s1[4],s2[4],s3[4]; int w0[4],w1[4]; int d0[2],d1[2],d2[2],d3[2]; int o0[2],o1[2]; // load for (int n = 0; n < 4; n++) { s0[n] = out_tile[n]; s1[n] = out_tile[n+ 4]; s2[n] = out_tile[n+ 8]; s3[n] = out_tile[n+12]; } // w = A_T * W for (int n = 0; n < 4; n++) { w0[n] = s0[n] + s1[n] + s2[n]; w1[n] = s1[n] - s2[n] + s3[n]; } // transpose w to w_t { d0[0] = w0[0]; d0[1] = w1[0]; d1[0] = w0[1]; d1[1] = w1[1]; d2[0] = w0[2]; d2[1] = w1[2]; d3[0] = w0[3]; d3[1] = w1[3]; } // Y = A_T * w_t for (int n = 0; n < 2; n++) { o0[n] = d0[n] + d1[n] + d2[n]; o1[n] = d1[n] - d2[n] + d3[n]; } // save to top blob tm,why right 2,because the G' = G*2 outRow0[0] = o0[0] >> 2; outRow0[1] = o0[1] >> 2; outRow1[0] = o1[0] >> 2; outRow1[1] = o1[1] >> 2; outRow0 += 2; outRow1 += 2; } } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt.blob_allocator, opt.num_threads); } static void conv3x3s1_winograd43_transform_kernel_int8_sse(const Mat& kernel, Mat& kernel_tm, int inch, int outch) { kernel_tm.create(6*6, inch, outch, 2ul); // G // const float ktm[6][3] = { // { 1.0f/4, 0.0f, 0.0f}, // { -1.0f/6, -1.0f/6, -1.0f/6}, // { -1.0f/6, 1.0f/6, -1.0f/6}, // { 1.0f/24, 1.0f/12, 1.0f/6}, // { 1.0f/24, -1.0f/12, 1.0f/6}, // { 0.0f, 0.0f, 1.0f} // }; const short ktm[6][3] = { { 6, 0, 0}, { -4, -4, -4}, { -4, 4, -4}, { 1, 2, 4}, { 1, -2, 4}, { 0, 0, 24} }; #pragma omp parallel for for (int p = 0; p<outch; p++) { for (int q = 0; q<inch; q++) { const signed char* kernel0 = (const signed char*)kernel + p*inch * 9 + q * 9; short* kernel_tm0 = kernel_tm.channel(p).row<short>(q); // transform kernel const signed char* k0 = kernel0; const signed char* k1 = kernel0 + 3; const signed char* k2 = kernel0 + 6; // h short tmp[6][3]; for (int i=0; i<6; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // U for (int j=0; j<6; j++) { short* tmpp = &tmp[j][0]; for (int i=0; i<6; i++) { kernel_tm0[j*6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } } static void conv3x3s1_winograd43_int8_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 4n+2, winograd F(4,3) Mat bottom_blob_bordered = bottom_blob; outw = (outw + 3) / 4 * 4; outh = (outh + 3) / 4 * 4; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f, opt.workspace_allocator, opt.num_threads); // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; int nColBlocks = h_tm/6; // may be the block num in Feathercnn int nRowBlocks = w_tm/6; const int tiles = nColBlocks * nRowBlocks; bottom_blob_tm.create(6*6, tiles, inch, 2u, opt.workspace_allocator); // BT // const float itm[4][4] = { // {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f}, // {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f}, // {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f}, // {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f} // }; // 0 = 4 * r00 - 5 * r02 + r04 // 1 = -4 * (r01 + r02) + r03 + r04 // 2 = 4 * (r01 - r02) - r03 + r04 // 3 = -2 * r01 - r02 + 2 * r03 + r04 // 4 = 2 * r01 - r02 - 2 * r03 + r04 // 5 = 4 * r01 - 5 * r03 + r05 #pragma omp parallel for num_threads(opt.num_threads) for (int q=0; q<inch; q++) { const signed char* img = bottom_blob_bordered.channel(q); short* out_tm0 = bottom_blob_tm.channel(q); for (int j = 0; j < nColBlocks; j++) { const signed char* r0 = img + w * j * 4; const signed char* r1 = r0 + w; const signed char* r2 = r1 + w; const signed char* r3 = r2 + w; const signed char* r4 = r3 + w; const signed char* r5 = r4 + w; for (int i = 0; i < nRowBlocks; i++) { short d0[6],d1[6],d2[6],d3[6],d4[6],d5[6]; short w0[6],w1[6],w2[6],w3[6],w4[6],w5[6]; short t0[6],t1[6],t2[6],t3[6],t4[6],t5[6]; // load for (int n = 0; n < 6; n++) { d0[n] = r0[n]; d1[n] = r1[n]; d2[n] = r2[n]; d3[n] = r3[n]; d4[n] = r4[n]; d5[n] = r5[n]; } // w = B_t * d for (int n = 0; n < 6; n++) { w0[n] = 4*d0[n] - 5*d2[n] + d4[n]; w1[n] = -4*d1[n] - 4*d2[n] + d3[n] + d4[n]; w2[n] = 4*d1[n] - 4*d2[n] - d3[n] + d4[n]; w3[n] = -2*d1[n] - d2[n] + 2*d3[n] + d4[n]; w4[n] = 2*d1[n] - d2[n] - 2*d3[n] + d4[n]; w5[n] = 4*d1[n] - 5*d3[n] + d5[n]; } // transpose d to d_t { t0[0]=w0[0]; t1[0]=w0[1]; t2[0]=w0[2]; t3[0]=w0[3]; t4[0]=w0[4]; t5[0]=w0[5]; t0[1]=w1[0]; t1[1]=w1[1]; t2[1]=w1[2]; t3[1]=w1[3]; t4[1]=w1[4]; t5[1]=w1[5]; t0[2]=w2[0]; t1[2]=w2[1]; t2[2]=w2[2]; t3[2]=w2[3]; t4[2]=w2[4]; t5[2]=w2[5]; t0[3]=w3[0]; t1[3]=w3[1]; t2[3]=w3[2]; t3[3]=w3[3]; t4[3]=w3[4]; t5[3]=w3[5]; t0[4]=w4[0]; t1[4]=w4[1]; t2[4]=w4[2]; t3[4]=w4[3]; t4[4]=w4[4]; t5[4]=w4[5]; t0[5]=w5[0]; t1[5]=w5[1]; t2[5]=w5[2]; t3[5]=w5[3]; t4[5]=w5[4]; t5[5]=w5[5]; } // d = B_t * d_t for (int n = 0; n < 6; n++) { d0[n] = 4*t0[n] - 5*t2[n] + t4[n]; d1[n] = - 4*t1[n] - 4*t2[n] + t3[n] + t4[n]; d2[n] = 4*t1[n] - 4*t2[n] - t3[n] + t4[n]; d3[n] = - 2*t1[n] - t2[n] + 2*t3[n] + t4[n]; d4[n] = 2*t1[n] - t2[n] - 2*t3[n] + t4[n]; d5[n] = 4*t1[n] - 5*t3[n] + t5[n]; } // save to out_tm for (int n = 0; n < 6; n++) { out_tm0[n ] = d0[n]; out_tm0[n+ 6] = d1[n]; out_tm0[n+12] = d2[n]; out_tm0[n+18] = d3[n]; out_tm0[n+24] = d4[n]; out_tm0[n+30] = d5[n]; } r0 += 4; r1 += 4; r2 += 4; r3 += 4; r4 += 4; r5 += 4; out_tm0 += 36; } } } } bottom_blob_bordered = Mat(); // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; int nColBlocks = h_tm/6; // may be the block num in Feathercnn int nRowBlocks = w_tm/6; const int tiles = nColBlocks * nRowBlocks; top_blob_tm.create(36, tiles, outch, 4u, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int p=0; p<outch; p++) { Mat out0_tm = top_blob_tm.channel(p); const Mat kernel0_tm = kernel_tm.channel(p); for (int i=0; i<tiles; i++) { int* output0_tm = out0_tm.row<int>(i); int sum0[36] = {0}; for (int q=0; q<inch; q++) { const short* r0 = bottom_blob_tm.channel(q).row<short>(i); const short* k0 = kernel0_tm.row<short>(q); for (int n=0; n<36; n++) { sum0[n] += (int)r0[n] * k0[n]; } } for (int n=0; n<36; n++) { output0_tm[n] = sum0[n]; } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; top_blob_bordered.create(outw, outh, outch, 4u, opt.workspace_allocator); { // AT // const float itm[4][6] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f} // }; // 0 = r00 + r01 + r02 + r03 + r04 // 1 = r01 - r02 + 2 * (r03 - r04) // 2 = r01 + r02 + 4 * (r03 + r04) // 3 = r01 - r02 + 8 * (r03 - r04) + r05 int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; int nColBlocks = h_tm/6; // may be the block num in Feathercnn int nRowBlocks = w_tm/6; #pragma omp parallel for num_threads(opt.num_threads) for (int p=0; p<outch; p++) { Mat out_tm = top_blob_tm.channel(p); Mat out = top_blob_bordered.channel(p); for (int j=0; j<nColBlocks; j++) { int* outRow0 = out.row<int>(j*4); int* outRow1 = out.row<int>(j*4+1); int* outRow2 = out.row<int>(j*4+2); int* outRow3 = out.row<int>(j*4+3); for(int i=0; i<nRowBlocks; i++) { int* out_tile = out_tm.row<int>(j*nRowBlocks + i); int s0[6],s1[6],s2[6],s3[6],s4[6],s5[6]; int w0[6],w1[6],w2[6],w3[6]; int d0[4],d1[4],d2[4],d3[4],d4[4],d5[4]; int o0[4],o1[4],o2[4],o3[4]; // load for (int n = 0; n < 6; n++) { s0[n] = out_tile[n]; s1[n] = out_tile[n+ 6]; s2[n] = out_tile[n+12]; s3[n] = out_tile[n+18]; s4[n] = out_tile[n+24]; s5[n] = out_tile[n+30]; } // w = A_T * W for (int n = 0; n < 6; n++) { w0[n] = s0[n] + s1[n] + s2[n] + s3[n] + s4[n]; w1[n] = s1[n] - s2[n] + 2*s3[n] - 2*s4[n]; w2[n] = s1[n] + s2[n] + 4*s3[n] + 4*s4[n]; w3[n] = s1[n] - s2[n] + 8*s3[n] - 8*s4[n] + s5[n]; } // transpose w to w_t { d0[0] = w0[0]; d0[1] = w1[0]; d0[2] = w2[0]; d0[3] = w3[0]; d1[0] = w0[1]; d1[1] = w1[1]; d1[2] = w2[1]; d1[3] = w3[1]; d2[0] = w0[2]; d2[1] = w1[2]; d2[2] = w2[2]; d2[3] = w3[2]; d3[0] = w0[3]; d3[1] = w1[3]; d3[2] = w2[3]; d3[3] = w3[3]; d4[0] = w0[4]; d4[1] = w1[4]; d4[2] = w2[4]; d4[3] = w3[4]; d5[0] = w0[5]; d5[1] = w1[5]; d5[2] = w2[5]; d5[3] = w3[5]; } // Y = A_T * w_t for (int n = 0; n < 4; n++) { o0[n] = d0[n] + d1[n] + d2[n] + d3[n] + d4[n]; o1[n] = d1[n] - d2[n] + 2*d3[n] - 2*d4[n]; o2[n] = d1[n] + d2[n] + 4*d3[n] + 4*d4[n]; o3[n] = d1[n] - d2[n] + 8*d3[n] - 8*d4[n] + d5[n]; } // save to top blob tm for (int n = 0; n < 4; n++) { outRow0[n] = o0[n] / 576; outRow1[n] = o1[n] / 576; outRow2[n] = o2[n] / 576; outRow3[n] = o3[n] / 576; } outRow0 += 4; outRow1 += 4; outRow2 += 4; outRow3 += 4; } } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt.blob_allocator, opt.num_threads); } static void conv3x3s2_int8_sse(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int tailstep = w - 2 * outw + w; const signed char *kernel = _kernel; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { Mat out0 = top_blob.channel(p); out0.fill(0); const signed char *kernel0 = (const signed char *)kernel + p * inch * 9; for (int q = 0; q < inch; q++) { int *outptr0 = out0; const signed char *img0 = bottom_blob.channel(q); const signed char *r0 = img0; const signed char *r1 = img0 + w; const signed char *r2 = img0 + w * 2; for (int i = 0; i < outh; i++) { int remain = outw; for (; remain > 0; remain--) { int sum0 = 0; sum0 += (int)r0[0] * kernel0[0]; sum0 += (int)r0[1] * kernel0[1]; sum0 += (int)r0[2] * kernel0[2]; sum0 += (int)r1[0] * kernel0[3]; sum0 += (int)r1[1] * kernel0[4]; sum0 += (int)r1[2] * kernel0[5]; sum0 += (int)r2[0] * kernel0[6]; sum0 += (int)r2[1] * kernel0[7]; sum0 += (int)r2[2] * kernel0[8]; *outptr0 += sum0; r0 += 2; r1 += 2; r2 += 2; outptr0++; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } kernel0 += 9; } } } static void conv3x3s1_int8_dequant_sse(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Mat &_bias, std::vector<float> scales_dequant, const Option& opt) { int kernel_w = 3; int kernel_h = 3; int stride_w = 1; int stride_h = 1; conv_im2col_sgemm_int8_dequant_sse(bottom_blob, top_blob, _kernel, kernel_w, kernel_h, stride_w, stride_h, _bias, scales_dequant, opt); } static void conv3x3s2_int8_dequant_sse(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Mat &_bias, std::vector<float> scales_dequant, const Option& opt) { int kernel_w = 3; int kernel_h = 3; int stride_w = 2; int stride_h = 2; conv_im2col_sgemm_int8_dequant_sse(bottom_blob, top_blob, _kernel, kernel_w, kernel_h, stride_w, stride_h, _bias, scales_dequant, opt); } static void conv3x3s1_int8_requant_sse(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Mat &_bias, std::vector<float> scales_requant, const Option& opt) { int kernel_w = 3; int kernel_h = 3; int stride_w = 1; int stride_h = 1; conv_im2col_sgemm_int8_requant_sse(bottom_blob, top_blob, _kernel, kernel_w, kernel_h, stride_w, stride_h, _bias, scales_requant, opt); } static void conv3x3s2_int8_requant_sse(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Mat &_bias, std::vector<float> scales_requant, const Option& opt) { int kernel_w = 3; int kernel_h = 3; int stride_w = 2; int stride_h = 2; conv_im2col_sgemm_int8_requant_sse(bottom_blob, top_blob, _kernel, kernel_w, kernel_h, stride_w, stride_h, _bias, scales_requant, opt); }
H2Pack_matvec.c
#include <stdio.h> #include <string.h> #include <stdlib.h> #include <assert.h> #include <math.h> #include <omp.h> #include "H2Pack_config.h" #include "H2Pack_typedef.h" #include "H2Pack_aux_structs.h" #include "H2Pack_matvec.h" #include "H2Pack_utils.h" #include "utils.h" // Calculate GEMV A * x0 and A^T * x1 in one run to reduce bandwidth pressure // Input parameters: // nrow : Number of rows in the matrix // ncol : Number of columns in the matrix // mat : Matrix, size >= nrow * ldm // ldm : Leading dimension of the matrix, >= ncol // x_in_0 : Input vector 0 // x_in_1 : Input vector 1 // Output parameter: // x_out_0 : Output vector 0, := mat * x_in_0 // x_out_1 : Output vector 1, := mat^T * x_in_1 void CBLAS_BI_GEMV( const int nrow, const int ncol, const DTYPE *mat, const int ldm, const DTYPE *x_in_0, const DTYPE *x_in_1, DTYPE *x_out_0, DTYPE *x_out_1 ) { const int nrow_2 = (nrow / 2) * 2; for (int i = 0; i < nrow_2; i += 2) { const DTYPE *mat_irow0 = mat + (i + 0) * ldm; const DTYPE *mat_irow1 = mat + (i + 1) * ldm; const DTYPE x_in_1_i0 = x_in_1[i + 0]; const DTYPE x_in_1_i1 = x_in_1[i + 1]; DTYPE sum0 = 0, sum1 = 0; #pragma omp simd for (int j = 0; j < ncol; j++) { DTYPE x_in_0_j = x_in_0[j]; sum0 += mat_irow0[j] * x_in_0_j; sum1 += mat_irow1[j] * x_in_0_j; DTYPE tmp = x_in_1_i0 * mat_irow0[j]; tmp += x_in_1_i1 * mat_irow1[j]; x_out_1[j] += tmp; } x_out_0[i + 0] += sum0; x_out_0[i + 1] += sum1; } for (int i = nrow_2; i < nrow; i++) { const DTYPE *mat_irow = mat + i * ldm; const DTYPE x_in_1_i = x_in_1[i]; DTYPE sum = 0; #pragma omp simd for (int j = 0; j < ncol; j++) { sum += mat_irow[j] * x_in_0[j]; x_out_1[j] += x_in_1_i * mat_irow[j]; } x_out_0[i] += sum; } } // Initialize auxiliary array y0 used in H2 matvec forward transformation void H2P_matvec_init_y0(H2Pack_p h2pack) { if (h2pack->y0 != NULL) return; int n_node = h2pack->n_node; h2pack->y0 = (H2P_dense_mat_p*) malloc(sizeof(H2P_dense_mat_p) * n_node); ASSERT_PRINTF( h2pack->y0 != NULL, "Failed to allocate %d H2P_dense_mat_t for H2 matvec buffer\n", n_node ); H2P_dense_mat_p *y0 = h2pack->y0; H2P_dense_mat_p *U = h2pack->U; for (int node = 0; node < n_node; node++) { int ncol = U[node]->ncol; if (ncol > 0) { H2P_dense_mat_init(&y0[node], ncol, 1); } else { H2P_dense_mat_init(&y0[node], 0, 0); y0[node]->nrow = 0; y0[node]->ncol = 0; y0[node]->ld = 0; } } } // H2 matvec forward transformation, calculate U_j^T * x_j void H2P_matvec_fwd_transform(H2Pack_p h2pack, const DTYPE *x) { int n_thread = h2pack->n_thread; int max_child = h2pack->max_child; int n_leaf_node = h2pack->n_leaf_node; int max_level = h2pack->max_level; int min_adm_level = (h2pack->is_HSS) ? h2pack->HSS_min_adm_level : h2pack->min_adm_level; int *children = h2pack->children; int *n_child = h2pack->n_child; int *level_n_node = h2pack->level_n_node; int *level_nodes = h2pack->level_nodes; int *mat_cluster = h2pack->mat_cluster; H2P_thread_buf_p *thread_buf = h2pack->tb; H2P_matvec_init_y0(h2pack); H2P_dense_mat_p *y0 = h2pack->y0; H2P_dense_mat_p *U = h2pack->U; for (int i = max_level; i >= min_adm_level; i--) { int *level_i_nodes = level_nodes + i * n_leaf_node; int level_i_n_node = level_n_node[i]; int n_thread_i = MIN(level_i_n_node, n_thread); #pragma omp parallel num_threads(n_thread_i) { int tid = omp_get_thread_num(); thread_buf[tid]->timer = -get_wtime_sec(); #pragma omp for schedule(dynamic) nowait for (int j = 0; j < level_i_n_node; j++) { int node = level_i_nodes[j]; int n_child_node = n_child[node]; H2P_dense_mat_p U_node = U[node]; H2P_dense_mat_resize(y0[node], U_node->ncol, 1); if (n_child_node == 0) { // Leaf node, directly calculate U_j^T * x_j const DTYPE *x_spos = x + mat_cluster[node * 2]; CBLAS_GEMV( CblasRowMajor, CblasTrans, U_node->nrow, U_node->ncol, 1.0, U_node->data, U_node->ld, x_spos, 1, 0.0, y0[node]->data, 1 ); } else { // Non-leaf node, multiple U{node}^T with each child node y0 directly int *node_children = children + node * max_child; int U_srow = 0; for (int k = 0; k < n_child_node; k++) { int child_k = node_children[k]; H2P_dense_mat_p y0_k = y0[child_k]; DTYPE *U_node_k = U_node->data + U_srow * U_node->ld; DTYPE beta = (k == 0) ? 0.0 : 1.0; CBLAS_GEMV( CblasRowMajor, CblasTrans, y0_k->nrow, U_node->ncol, 1.0, U_node_k, U_node->ld, y0_k->data, 1, beta, y0[node]->data, 1 ); U_srow += y0_k->nrow; } } // End of "if (n_child_node == 0)" } // End of j loop thread_buf[tid]->timer += get_wtime_sec(); } // End of "pragma omp parallel" if (h2pack->print_timers == 1) { double max_t = 0.0, avg_t = 0.0, min_t = 19241112.0; for (int i = 0; i < n_thread_i; i++) { double thread_i_timer = thread_buf[i]->timer; avg_t += thread_i_timer; max_t = MAX(max_t, thread_i_timer); min_t = MIN(min_t, thread_i_timer); } avg_t /= (double) n_thread_i; INFO_PRINTF("Matvec forward transformation: level %d, %d/%d threads, %d nodes\n", i, n_thread_i, n_thread, level_i_n_node); INFO_PRINTF(" min/avg/max thread wall-time = %.3lf, %.3lf, %.3lf (s)\n", min_t, avg_t, max_t); } } // End of i loop } // Transpose y0[i] from a npt*krnl_dim-by-1 vector (npt-by-krnl_dim // matrix) to a krnl_dim-by-npt matrix void H2P_transpose_y0_from_krnldim(H2Pack_p h2pack) { int n_node = h2pack->n_node; int n_thread = h2pack->n_thread; int krnl_dim = h2pack->krnl_dim; #pragma omp parallel num_threads(n_thread) { int tid = omp_get_thread_num(); H2P_dense_mat_p y0_tmp = h2pack->tb[tid]->mat0; #pragma omp for schedule(dynamic) for (int node = 0; node < n_node; node++) { H2P_dense_mat_p y0_node = h2pack->y0[node]; if (y0_node->ld == 0) continue; int y0_len = y0_node->nrow; int y0_npt = y0_len / krnl_dim; H2P_dense_mat_resize(y0_tmp, y0_len, 1); H2P_transpose_dmat(1, y0_npt, krnl_dim, y0_node->data, krnl_dim, y0_tmp->data, y0_npt); memcpy(y0_node->data, y0_tmp->data, sizeof(DTYPE) * y0_len); } } } // Transpose y1[i] from a krnl_dim-by-npt matrix to // a npt*krnl_dim-by-1 vector (npt-by-krnl_dim matrix) void H2P_transpose_y1_to_krnldim(H2Pack_p h2pack) { int n_node = h2pack->n_node; int n_thread = h2pack->n_thread; int krnl_dim = h2pack->krnl_dim; #pragma omp parallel num_threads(n_thread) { int tid = omp_get_thread_num(); H2P_dense_mat_p y1_tmp = h2pack->tb[tid]->mat0; #pragma omp for schedule(dynamic) for (int node = 0; node < n_node; node++) { H2P_dense_mat_p y1_node = h2pack->y1[node]; if (y1_node->ld == 0) continue; int y1_len = y1_node->ncol; int y1_npt = y1_len / krnl_dim; H2P_dense_mat_resize(y1_tmp, y1_len, 1); H2P_transpose_dmat(1, krnl_dim, y1_npt, y1_node->data, y1_npt, y1_tmp->data, krnl_dim); memcpy(y1_node->data, y1_tmp->data, sizeof(DTYPE) * y1_len); } } } // Initialize auxiliary array y1 used in H2 matvec intermediate multiplication void H2P_matvec_init_y1(H2Pack_p h2pack) { int n_node = h2pack->n_node; int n_thread = h2pack->n_thread; int *node_n_r_adm = (h2pack->is_HSS == 1) ? h2pack->node_n_r_inadm : h2pack->node_n_r_adm; H2P_dense_mat_p *U = h2pack->U; if (h2pack->y1 == NULL) { h2pack->y1 = (H2P_dense_mat_p*) malloc(sizeof(H2P_dense_mat_p) * n_node); ASSERT_PRINTF( h2pack->y1 != NULL, "Failed to allocate %d H2P_dense_mat_t for H2 matvec buffer\n", n_node ); for (int i = 0; i < n_node; i++) H2P_dense_mat_init(&h2pack->y1[i], 0, 0); } H2P_dense_mat_p *y1 = h2pack->y1; // Use ld to mark if y1[i] is visited in this intermediate sweep // The first U[i]->ncol elements in y1[i]->data will be used in downward sweep for (int i = 0; i < n_node; i++) { y1[i]->ld = 0; if (node_n_r_adm[i]) H2P_dense_mat_resize(y1[i], n_thread, U[i]->ncol); } // Each thread set its y1 buffer to 0 (NUMA first touch) #pragma omp parallel { int tid = omp_get_thread_num(); for (int i = 0; i < n_node; i++) { if (y1[i]->ld == 0) continue; DTYPE *y1_i_thread = y1[i]->data + tid * y1[i]->ncol; memset(y1_i_thread, 0, sizeof(DTYPE) * y1[i]->ncol); } } } // Sum thread-local buffers to obtain final y1 results void H2P_matvec_sum_y1_thread(H2Pack_p h2pack) { int n_node = h2pack->n_node; int n_thread = h2pack->n_thread; H2P_dense_mat_p *y1 = h2pack->y1; H2P_thread_buf_p *thread_buf = h2pack->tb; #pragma omp parallel num_threads(n_thread) { int tid = omp_get_thread_num(); thread_buf[tid]->timer -= get_wtime_sec(); #pragma omp for schedule(dynamic) nowait for (int i = 0; i < n_node; i++) { if (y1[i]->ld == 0) continue; int ncol = y1[i]->ncol; DTYPE *dst_row = y1[i]->data; for (int j = 1; j < n_thread; j++) { DTYPE *src_row = y1[i]->data + j * ncol; #pragma omp simd for (int k = 0; k < ncol; k++) dst_row[k] += src_row[k]; } } thread_buf[tid]->timer += get_wtime_sec(); } } // Calculate H2 matvec intermediate multiplication task block on a thread void H2P_matvec_intmd_mult_AOT_task_block( H2Pack_p h2pack, const int tid, const int i_blk, const DTYPE *x, DTYPE *y ) { int *r_adm_pairs = (h2pack->is_HSS) ? h2pack->HSS_r_adm_pairs : h2pack->r_adm_pairs; int *node_level = h2pack->node_level; int *mat_cluster = h2pack->mat_cluster; int *B_nrow = h2pack->B_nrow; int *B_ncol = h2pack->B_ncol; size_t *B_ptr = h2pack->B_ptr; DTYPE *B_data = h2pack->B_data; H2P_int_vec_p B_blk = h2pack->B_blk; H2P_dense_mat_p *y0 = h2pack->y0; H2P_dense_mat_p *y1 = h2pack->y1; int B_blk_s = B_blk->data[i_blk]; int B_blk_e = B_blk->data[i_blk + 1]; for (int i = B_blk_s; i < B_blk_e; i++) { int node0 = r_adm_pairs[2 * i]; int node1 = r_adm_pairs[2 * i + 1]; int level0 = node_level[node0]; int level1 = node_level[node1]; DTYPE *Bi = B_data + B_ptr[i]; int Bi_nrow = B_nrow[i]; int Bi_ncol = B_ncol[i]; // (1) Two nodes are of the same level, compress on both sides if (level0 == level1) { int ncol0 = y1[node0]->ncol; int ncol1 = y1[node1]->ncol; DTYPE *y1_dst_0 = y1[node0]->data + tid * ncol0; DTYPE *y1_dst_1 = y1[node1]->data + tid * ncol1; CBLAS_BI_GEMV( Bi_nrow, Bi_ncol, Bi, Bi_ncol, y0[node1]->data, y0[node0]->data, y1_dst_0, y1_dst_1 ); } // (2) node1 is a leaf node and its level is higher than node0's level, // only compressed on node0's side, node1's side don't need the // downward sweep and can directly accumulate result to output vector if (level0 > level1) { int vec_s1 = mat_cluster[node1 * 2]; DTYPE *y_spos = y + vec_s1; const DTYPE *x_spos = x + vec_s1; int ncol0 = y1[node0]->ncol; DTYPE *y1_dst_0 = y1[node0]->data + tid * ncol0; CBLAS_BI_GEMV( Bi_nrow, Bi_ncol, Bi, Bi_ncol, x_spos, y0[node0]->data, y1_dst_0, y_spos ); } // (3) node0 is a leaf node and its level is higher than node1's level, // only compressed on node1's side, node0's side don't need the // downward sweep and can directly accumulate result to output vector if (level0 < level1) { int vec_s0 = mat_cluster[node0 * 2]; DTYPE *y_spos = y + vec_s0; const DTYPE *x_spos = x + vec_s0; int ncol1 = y1[node1]->ncol; DTYPE *y1_dst_1 = y1[node1]->data + tid * ncol1; CBLAS_BI_GEMV( Bi_nrow, Bi_ncol, Bi, Bi_ncol, y0[node1]->data, x_spos, y_spos, y1_dst_1 ); } } // End of i loop } // H2 matvec intermediate multiplication, calculate B_{ij} * (U_j^T * x_j) // All B_{ij} matrices have been calculated and stored void H2P_matvec_intmd_mult_AOT(H2Pack_p h2pack, const DTYPE *x) { int n_thread = h2pack->n_thread; H2P_int_vec_p B_blk = h2pack->B_blk; H2P_thread_buf_p *thread_buf = h2pack->tb; // 1. Initialize y1 H2P_matvec_init_y1(h2pack); // 2. Intermediate sweep // If (n_B_blk <= n_thread), B is constructed in H2Pack using a static workload // partitioning and NUMA first-touch optimization, we also use the same static // workload partitioning here for NUMA optimization. Otherwise, use OpenMP dynamic // scheduler for load balance. const int n_B_blk = B_blk->length - 1; #pragma omp parallel num_threads(n_thread) { int tid = omp_get_thread_num(); DTYPE *y = thread_buf[tid]->y; thread_buf[tid]->timer = -get_wtime_sec(); if (n_B_blk <= n_thread) { int i_blk = tid; if (i_blk < n_B_blk) H2P_matvec_intmd_mult_AOT_task_block(h2pack, tid, i_blk, x, y); } else { #pragma omp for schedule(dynamic) nowait for (int i_blk = 0; i_blk < n_B_blk; i_blk++) H2P_matvec_intmd_mult_AOT_task_block(h2pack, tid, i_blk, x, y); } thread_buf[tid]->timer += get_wtime_sec(); } // End of "pragma omp parallel" // 3. Sum thread-local buffers in y1 H2P_matvec_sum_y1_thread(h2pack); if (h2pack->print_timers == 1) { double max_t = 0.0, avg_t = 0.0, min_t = 19241112.0; for (int i = 0; i < n_thread; i++) { double thread_i_timer = thread_buf[i]->timer; avg_t += thread_i_timer; max_t = MAX(max_t, thread_i_timer); min_t = MIN(min_t, thread_i_timer); } avg_t /= (double) n_thread; INFO_PRINTF("Matvec intermediate multiplication: min/avg/max thread wall-time = %.3lf, %.3lf, %.3lf (s)\n", min_t, avg_t, max_t); } } // Extend the number of points to a multiple of SIMD_LEN and perform an n-body bi-matvec // Input parameters: // coord0 : Matrix, size dim-by-ld0, coordinates of the 1st point set // ld0 : Leading dimension of coord0, should be >= n0 // n0 : Number of points in coord0 (each column in coord0 is a coordinate) // coord1 : Matrix, size dim-by-ld1, coordinates of the 2nd point set // ld1 : Leading dimension of coord1, should be >= n1 // n1 : Number of points in coord1 (each column in coord0 is a coordinate) // x_in_0 : Matrix, size >= krnl_dim * n1, will be left multiplied by kernel_matrix(coord0, coord1) // x_in_1 : Matrix, size >= krnl_dim * n0, will be left multiplied by kernel_matrix(coord1, coord0) // ldi0, ldi1 : Leading dimensions of x_in_0 and x_in_1 // ldo0, ldo1 : Leading dimensions of x_out_0 and x_out_1 // xpt_dim : Dimension of extended point coordinate // krnl_dim : Dimension of tensor kernel's return // workbuf : H2P_dense_mat data structure for allocating working buffer // krnl_param : Pointer to kernel function parameter array // krnl_bimv : Pointer to kernel matrix bi-matvec function // Output parameter: // x_out_0 : Matrix, size >= krnl_dim * n0, x_out_0 += kernel_matrix(coord0, coord1) * x_in_0 // x_out_1 : Matrix, size >= krnl_dim * n1, x_out_1 += kernel_matrix(coord1, coord0) * x_in_1 // Note: // For x_{in,out}_{0,1}, they are not stored as the original (n{0,1} * krnl_dim)-by-1 column vector, // which can be viewed as n{0,1}-by-krnl_dim matrices. Instead, they are stored as krnl_dim-by-n{0,1} // matrices so the krnl_bimv can vectorize the load and store. void H2P_ext_krnl_bimv( const DTYPE *coord0, const int ld0, const int n0, const DTYPE *coord1, const int ld1, const int n1, const DTYPE *x_in_0, const DTYPE *x_in_1, DTYPE *x_out_0, DTYPE *x_out_1, const int ldi0, const int ldi1, const int ldo0, const int ldo1, const int xpt_dim, const int krnl_dim, H2P_dense_mat_p workbuf, const void *krnl_param, kernel_bimv_fptr krnl_bimv ) { int n0_ext = (n0 + SIMD_LEN - 1) / SIMD_LEN * SIMD_LEN; int n1_ext = (n1 + SIMD_LEN - 1) / SIMD_LEN * SIMD_LEN; int n01_ext = n0_ext + n1_ext; int buf_size = (xpt_dim + krnl_dim) * n01_ext * 2; H2P_dense_mat_resize(workbuf, 1, buf_size); DTYPE *trg_coord = workbuf->data; DTYPE *src_coord = trg_coord + xpt_dim * n0_ext; DTYPE *x_in_0_ = src_coord + xpt_dim * n1_ext; DTYPE *x_in_1_ = x_in_0_ + n1_ext * krnl_dim; DTYPE *x_out_0_ = x_in_1_ + n0_ext * krnl_dim; DTYPE *x_out_1_ = x_out_0_ + n0_ext * krnl_dim; // Copy coordinates and pad the extend part for (int i = 0; i < xpt_dim; i++) { const DTYPE *c0_src = coord0 + i * ld0; const DTYPE *c1_src = coord1 + i * ld1; DTYPE *c0_dst = trg_coord + i * n0_ext; DTYPE *c1_dst = src_coord + i * n1_ext; memcpy(c0_dst, c0_src, sizeof(DTYPE) * n0); memcpy(c1_dst, c1_src, sizeof(DTYPE) * n1); for (int j = n0; j < n0_ext; j++) c0_dst[j] = 0; for (int j = n1; j < n1_ext; j++) c1_dst[j] = 0; } // Copy input vectors and initialize output vectors // Must set the last n{0,1}_ext - n{0,1} elements in each row to 0, // otherwise tensor kernel results might be incorrect for (int i = 0; i < krnl_dim; i++) { const DTYPE *src = x_in_0 + i * ldi0; DTYPE *dst = x_in_0_ + i * n1_ext; memcpy(dst, src, sizeof(DTYPE) * n1); for (int j = n1; j < n1_ext; j++) dst[j] = 0; } memset(x_out_0_, 0, sizeof(DTYPE) * n0_ext * krnl_dim); for (int i = 0; i < krnl_dim; i++) { const DTYPE *src = x_in_1 + i * ldi1; DTYPE *dst = x_in_1_ + i * n0_ext; memcpy(dst, src, sizeof(DTYPE) * n0); for (int j = n0; j < n0_ext; j++) dst[j] = 0; } memset(x_out_1_, 0, sizeof(DTYPE) * n1_ext * krnl_dim); // Do the n-body bi-matvec krnl_bimv( trg_coord, n0_ext, n0_ext, src_coord, n1_ext, n1_ext, krnl_param, x_in_0_, x_in_1_, x_out_0_, x_out_1_ ); // Add results back to original output vectors for (int i = 0; i < krnl_dim; i++) { DTYPE *dst = x_out_0 + i * ldo0; DTYPE *src = x_out_0_ + i * n0_ext; #pragma omp simd for (int j = 0; j < n0; j++) dst[j] += src[j]; } for (int i = 0; i < krnl_dim; i++) { DTYPE *dst = x_out_1 + i * ldo1; DTYPE *src = x_out_1_ + i * n1_ext; #pragma omp simd for (int j = 0; j < n1; j++) dst[j] += src[j]; } } // Evaluate a kernel matrix block, then perform a bi-matvec using this kernel matrix block // Input parameters: // coord0 : Matrix, size dim-by-ld0, coordinates of the 1st point set // ld0 : Leading dimension of coord0, should be >= n0 // n0 : Number of points in coord0 (each column in coord0 is a coordinate) // coord1 : Matrix, size dim-by-ld1, coordinates of the 2nd point set // ld1 : Leading dimension of coord1, should be >= n1 // n1 : Number of points in coord1 (each column in coord0 is a coordinate) // x_in_0 : Vector, size >= n1 * krnl_dim, will be left multiplied by kernel_matrix(coord0, coord1) // x_in_1 : Vector, size >= n0 * krnl_dim, will be left multiplied by kernel_matrix(coord1, coord0) // krnl_dim : Dimension of tensor kernel's return // npt_row_blk : Blocking size for coord0 points // krnl_param : Pointer to kernel function parameter array // krnl_eval : Pointer to kernel matrix evaluation function // Output parameter: // x_out_0 : Vector, size >= n0 * krnl_dim, x_out_0 += kernel_matrix(coord0, coord1) * x_in_0 // x_out_1 : Vector, size >= n1 * krnl_dim, x_out_1 += kernel_matrix(coord1, coord0) * x_in_1 void H2P_krnl_eval_bimv( const DTYPE *coord0, const int ld0, const int n0, const DTYPE *coord1, const int ld1, const int n1, const DTYPE *x_in_0, const DTYPE *x_in_1, DTYPE *x_out_0, DTYPE *x_out_1, const int krnl_dim, const int npt_row_blk, DTYPE *matbuf, const void *krnl_param, kernel_eval_fptr krnl_eval ) { const int ldm = n1 * krnl_dim; for (int blk_pt_s = 0; blk_pt_s < n0; blk_pt_s += npt_row_blk) { int blk_npt = (blk_pt_s + npt_row_blk > n0) ? (n0 - blk_pt_s) : npt_row_blk; int blk_srow = blk_pt_s * krnl_dim; int blk_nrow = blk_npt * krnl_dim; krnl_eval( coord0 + blk_pt_s, ld0, blk_npt, coord1, ld1, n1, krnl_param, matbuf, ldm ); CBLAS_BI_GEMV( blk_nrow, ldm, matbuf, ldm, x_in_0, x_in_1 + blk_srow, x_out_0 + blk_srow, x_out_1 ); } } // H2 matvec intermediate multiplication, calculate B_{ij} * (U_j^T * x_j) // Need to calculate all B_{ij} matrices before using it void H2P_matvec_intmd_mult_JIT(H2Pack_p h2pack, const DTYPE *x) { int xpt_dim = h2pack->xpt_dim; int krnl_dim = h2pack->krnl_dim; int n_point = h2pack->n_point; int n_thread = h2pack->n_thread; int *r_adm_pairs = (h2pack->is_HSS) ? h2pack->HSS_r_adm_pairs : h2pack->r_adm_pairs; int *node_level = h2pack->node_level; int *pt_cluster = h2pack->pt_cluster; int *mat_cluster = h2pack->mat_cluster; int *B_nrow = h2pack->B_nrow; int *B_ncol = h2pack->B_ncol; DTYPE *coord = h2pack->coord; void *krnl_param = h2pack->krnl_param; H2P_int_vec_p B_blk = h2pack->B_blk; H2P_dense_mat_p *y0 = h2pack->y0; H2P_dense_mat_p *J_coord = h2pack->J_coord; kernel_eval_fptr krnl_eval = h2pack->krnl_eval; kernel_bimv_fptr krnl_bimv = h2pack->krnl_bimv; H2P_thread_buf_p *thread_buf = h2pack->tb; // 1. Initialize y1 H2P_matvec_init_y1(h2pack); H2P_dense_mat_p *y1 = h2pack->y1; // 2. Intermediate sweep const int n_B_blk = B_blk->length - 1; #pragma omp parallel num_threads(n_thread) { int tid = omp_get_thread_num(); H2P_dense_mat_p Bi = thread_buf[tid]->mat0; DTYPE *y = thread_buf[tid]->y; H2P_dense_mat_p workbuf = thread_buf[tid]->mat1; thread_buf[tid]->timer = -get_wtime_sec(); #pragma omp for schedule(dynamic) nowait for (int i_blk = 0; i_blk < n_B_blk; i_blk++) { int B_blk_s = B_blk->data[i_blk]; int B_blk_e = B_blk->data[i_blk + 1]; for (int i = B_blk_s; i < B_blk_e; i++) { int node0 = r_adm_pairs[2 * i]; int node1 = r_adm_pairs[2 * i + 1]; int level0 = node_level[node0]; int level1 = node_level[node1]; int Bi_nrow = B_nrow[i]; int Bi_ncol = B_ncol[i]; int Bi_nrow_128KB = (128 * 1024) / (sizeof(DTYPE) * Bi_ncol); int Bi_blk_npt = Bi_nrow_128KB / krnl_dim; Bi_nrow_128KB = Bi_blk_npt * krnl_dim; H2P_dense_mat_resize(Bi, Bi_nrow_128KB, Bi_ncol); // (1) Two nodes are of the same level, compress on both sides if (level0 == level1) { int ncol0 = y1[node0]->ncol; int ncol1 = y1[node1]->ncol; DTYPE *y1_dst_0 = y1[node0]->data + tid * ncol0; DTYPE *y1_dst_1 = y1[node1]->data + tid * ncol1; if (krnl_bimv != NULL) { int node0_npt = Bi_nrow / krnl_dim; int node1_npt = Bi_ncol / krnl_dim; H2P_ext_krnl_bimv( J_coord[node0]->data, J_coord[node0]->ncol, J_coord[node0]->ncol, J_coord[node1]->data, J_coord[node1]->ncol, J_coord[node1]->ncol, y0[node1]->data, y0[node0]->data, y1_dst_0, y1_dst_1, node1_npt, node0_npt, node0_npt, node1_npt, xpt_dim, krnl_dim, workbuf, krnl_param, krnl_bimv ); } else { H2P_krnl_eval_bimv( J_coord[node0]->data, J_coord[node0]->ncol, J_coord[node0]->ncol, J_coord[node1]->data, J_coord[node1]->ncol, J_coord[node1]->ncol, y0[node1]->data, y0[node0]->data, y1_dst_0, y1_dst_1, krnl_dim, Bi_blk_npt, Bi->data, krnl_param, krnl_eval ); } } // (2) node1 is a leaf node and its level is higher than node0's level, // only compressed on node0's side, node1's side don't need the // downward sweep and can directly accumulate result to output vector if (level0 > level1) { int pt_s1 = pt_cluster[node1 * 2]; int node1_npt = pt_cluster[node1 * 2 + 1] - pt_s1 + 1; int vec_s1 = mat_cluster[node1 * 2]; int ncol0 = y1[node0]->ncol; DTYPE *y1_dst_0 = y1[node0]->data + tid * ncol0; if (krnl_bimv != NULL) { const DTYPE *x_spos = x + pt_s1; DTYPE *y_spos = y + pt_s1; int node0_npt = Bi_nrow / krnl_dim; H2P_ext_krnl_bimv( J_coord[node0]->data, J_coord[node0]->ncol, J_coord[node0]->ncol, coord + pt_s1, n_point, node1_npt, x_spos, y0[node0]->data, y1_dst_0, y_spos, n_point, node0_npt, node0_npt, n_point, xpt_dim, krnl_dim, workbuf, krnl_param, krnl_bimv ); } else { const DTYPE *x_spos = x + vec_s1; DTYPE *y_spos = y + vec_s1; H2P_krnl_eval_bimv( J_coord[node0]->data, J_coord[node0]->ncol, J_coord[node0]->ncol, coord + pt_s1, n_point, node1_npt, x_spos, y0[node0]->data, y1_dst_0, y_spos, krnl_dim, Bi_blk_npt, Bi->data, krnl_param, krnl_eval ); } } // (3) node0 is a leaf node and its level is higher than node1's level, // only compressed on node1's side, node0's side don't need the // downward sweep and can directly accumulate result to output vector if (level0 < level1) { int pt_s0 = pt_cluster[node0 * 2]; int node0_npt = pt_cluster[node0 * 2 + 1] - pt_s0 + 1; int vec_s0 = mat_cluster[node0 * 2]; int ncol1 = y1[node1]->ncol; DTYPE *y1_dst_1 = y1[node1]->data + tid * ncol1; if (krnl_bimv != NULL) { const DTYPE *x_spos = x + pt_s0; DTYPE *y_spos = y + pt_s0; int node1_npt = Bi_ncol / krnl_dim; H2P_ext_krnl_bimv( coord + pt_s0, n_point, node0_npt, J_coord[node1]->data, J_coord[node1]->ncol, J_coord[node1]->ncol, y0[node1]->data, x_spos, y_spos, y1_dst_1, node1_npt, n_point, n_point, node1_npt, xpt_dim, krnl_dim, workbuf, krnl_param, krnl_bimv ); } else { const DTYPE *x_spos = x + vec_s0; DTYPE *y_spos = y + vec_s0; H2P_krnl_eval_bimv( coord + pt_s0, n_point, node0_npt, J_coord[node1]->data, J_coord[node1]->ncol, J_coord[node1]->ncol, y0[node1]->data, x_spos, y_spos, y1_dst_1, krnl_dim, Bi_blk_npt, Bi->data, krnl_param, krnl_eval ); } } } // End of i loop } // End of i_blk loop thread_buf[tid]->timer += get_wtime_sec(); } // End of "pragma omp parallel" // 3. Sum thread-local buffers in y1 H2P_matvec_sum_y1_thread(h2pack); if (h2pack->print_timers == 1) { double max_t = 0.0, avg_t = 0.0, min_t = 19241112.0; for (int i = 0; i < n_thread; i++) { double thread_i_timer = thread_buf[i]->timer; avg_t += thread_i_timer; max_t = MAX(max_t, thread_i_timer); min_t = MIN(min_t, thread_i_timer); } avg_t /= (double) n_thread; INFO_PRINTF("Matvec intermediate multiplication: min/avg/max thread wall-time = %.3lf, %.3lf, %.3lf (s)\n", min_t, avg_t, max_t); } } // H2 matvec backward transformation, calculate U_i * (B_{ij} * (U_j^T * x_j)) void H2P_matvec_bwd_transform(H2Pack_p h2pack, const DTYPE *x, DTYPE *y) { int n_thread = h2pack->n_thread; int max_child = h2pack->max_child; int n_leaf_node = h2pack->n_leaf_node; int max_level = h2pack->max_level; int min_adm_level = (h2pack->is_HSS) ? h2pack->HSS_min_adm_level : h2pack->min_adm_level; int *children = h2pack->children; int *n_child = h2pack->n_child; int *level_n_node = h2pack->level_n_node; int *level_nodes = h2pack->level_nodes; int *mat_cluster = h2pack->mat_cluster; H2P_dense_mat_p *U = h2pack->U; H2P_dense_mat_p *y1 = h2pack->y1; H2P_thread_buf_p *thread_buf = h2pack->tb; for (int i = min_adm_level; i <= max_level; i++) { int *level_i_nodes = level_nodes + i * n_leaf_node; int level_i_n_node = level_n_node[i]; int n_thread_i = MIN(level_i_n_node, n_thread); #pragma omp parallel num_threads(n_thread_i) { int tid = omp_get_thread_num(); H2P_dense_mat_p y1_tmp = thread_buf[tid]->mat0; thread_buf[tid]->timer = -get_wtime_sec(); #pragma omp for schedule(dynamic) nowait for (int j = 0; j < level_i_n_node; j++) { int node = level_i_nodes[j]; int n_child_node = n_child[node]; int *child_nodes = children + node * max_child; if (y1[node]->ld == 0) continue; H2P_dense_mat_resize(y1_tmp, U[node]->nrow, 1); CBLAS_GEMV( CblasRowMajor, CblasNoTrans, U[node]->nrow, U[node]->ncol, 1.0, U[node]->data, U[node]->ld, y1[node]->data, 1, 0.0, y1_tmp->data, 1 ); if (n_child_node == 0) { // Leaf node, accumulate final results to output vector int s_index = mat_cluster[2 * node]; int e_index = mat_cluster[2 * node + 1]; int n_point = e_index - s_index + 1; DTYPE *y_spos = y + s_index; #pragma omp simd for (int k = 0; k < n_point; k++) y_spos[k] += y1_tmp->data[k]; } else { // Non-leaf node, push down y1 values int y1_tmp_idx = 0; for (int k = 0; k < n_child_node; k++) { int child_k = child_nodes[k]; int child_k_len = U[child_k]->ncol; DTYPE *y1_tmp_spos = y1_tmp->data + y1_tmp_idx; if (y1[child_k]->ld == 0) { H2P_dense_mat_resize(y1[child_k], child_k_len, 1); memcpy(y1[child_k]->data, y1_tmp_spos, sizeof(DTYPE) * child_k_len); } else { #pragma omp simd for (int l = 0; l < child_k_len; l++) y1[child_k]->data[l] += y1_tmp_spos[l]; } y1_tmp_idx += child_k_len; } } // End of "if (n_child_node == 0)" } // End of j loop thread_buf[tid]->timer += get_wtime_sec(); } // End of "pragma omp parallel" if (h2pack->print_timers == 1) { double max_t = 0.0, avg_t = 0.0, min_t = 19241112.0; for (int i = 0; i < n_thread_i; i++) { double thread_i_timer = thread_buf[i]->timer; avg_t += thread_i_timer; max_t = MAX(max_t, thread_i_timer); min_t = MIN(min_t, thread_i_timer); } avg_t /= (double) n_thread_i; INFO_PRINTF("Matvec backward transformation: level %d, %d/%d threads, %d nodes\n", i, n_thread_i, n_thread, level_i_n_node); INFO_PRINTF(" min/avg/max thread wall-time = %.3lf, %.3lf, %.3lf (s)\n", min_t, avg_t, max_t); } // End of "if (h2pack->print_timers == 1)" } // End of i loop } // Calculate H2 matvec dense multiplication part 0 task block on a thread void H2P_matvec_dense_mult0_AOT_task_block( H2Pack_p h2pack, const int tid, const int i_blk0, const DTYPE *x, DTYPE *y ) { int *leaf_nodes = h2pack->height_nodes; int *mat_cluster = h2pack->mat_cluster; int *D_nrow = h2pack->D_nrow; int *D_ncol = h2pack->D_ncol; size_t *D_ptr = h2pack->D_ptr; DTYPE *D_data = h2pack->D_data; H2P_int_vec_p D_blk0 = h2pack->D_blk0; int D_blk0_s = D_blk0->data[i_blk0]; int D_blk0_e = D_blk0->data[i_blk0 + 1]; for (int i = D_blk0_s; i < D_blk0_e; i++) { int node = leaf_nodes[i]; int vec_s = mat_cluster[node * 2]; DTYPE *y_spos = y + vec_s; const DTYPE *x_spos = x + vec_s; DTYPE *Di = D_data + D_ptr[i]; int Di_nrow = D_nrow[i]; int Di_ncol = D_ncol[i]; CBLAS_GEMV( CblasRowMajor, CblasNoTrans, Di_nrow, Di_ncol, 1.0, Di, Di_ncol, x_spos, 1, 1.0, y_spos, 1 ); } } // Calculate H2 matvec dense multiplication part 1 task block on a thread void H2P_matvec_dense_mult1_AOT_task_block( H2Pack_p h2pack, const int tid, const int i_blk1, const DTYPE *x, DTYPE *y ) { int n_leaf_node = h2pack->n_leaf_node; int *r_inadm_pairs = (h2pack->is_HSS) ? h2pack->HSS_r_inadm_pairs : h2pack->r_inadm_pairs; int *mat_cluster = h2pack->mat_cluster; int *D_nrow = h2pack->D_nrow; int *D_ncol = h2pack->D_ncol; size_t *D_ptr = h2pack->D_ptr; DTYPE *D_data = h2pack->D_data; H2P_int_vec_p D_blk1 = h2pack->D_blk1; int D_blk1_s = D_blk1->data[i_blk1]; int D_blk1_e = D_blk1->data[i_blk1 + 1]; for (int i = D_blk1_s; i < D_blk1_e; i++) { int node0 = r_inadm_pairs[2 * i]; int node1 = r_inadm_pairs[2 * i + 1]; int vec_s0 = mat_cluster[2 * node0]; int vec_s1 = mat_cluster[2 * node1]; DTYPE *y_spos0 = y + vec_s0; DTYPE *y_spos1 = y + vec_s1; const DTYPE *x_spos0 = x + vec_s0; const DTYPE *x_spos1 = x + vec_s1; DTYPE *Di = D_data + D_ptr[n_leaf_node + i]; int Di_nrow = D_nrow[n_leaf_node + i]; int Di_ncol = D_ncol[n_leaf_node + i]; CBLAS_BI_GEMV( Di_nrow, Di_ncol, Di, Di_ncol, x_spos1, x_spos0, y_spos0, y_spos1 ); } } // H2 matvec dense multiplication, calculate D_{ij} * x_j // All D_{ij} matrices have been calculated and stored void H2P_matvec_dense_mult_AOT(H2Pack_p h2pack, const DTYPE *x) { int n_thread = h2pack->n_thread; H2P_int_vec_p D_blk0 = h2pack->D_blk0; H2P_int_vec_p D_blk1 = h2pack->D_blk1; H2P_thread_buf_p *thread_buf = h2pack->tb; // If (n_D0_blk <= n_thread) or (n_D1_blk <= n_thread), D is constructed in // H2Pack using a static workload partitioning and NUMA first-touch optimization, // we also use the same static workload partitioning here for NUMA optimization. // Otherwise, use OpenMP dynamic scheduler for load balance. const int n_D0_blk = D_blk0->length - 1; const int n_D1_blk = D_blk1->length - 1; #pragma omp parallel num_threads(n_thread) { int tid = omp_get_thread_num(); DTYPE *y = thread_buf[tid]->y; thread_buf[tid]->timer = -get_wtime_sec(); // 1. Diagonal blocks matvec if (n_D0_blk <= n_thread) { int i_blk0 = tid; if (i_blk0 < n_D0_blk) H2P_matvec_dense_mult0_AOT_task_block(h2pack, tid, i_blk0, x, y); } else { #pragma omp for schedule(dynamic) nowait for (int i_blk0 = 0; i_blk0 < n_D0_blk; i_blk0++) H2P_matvec_dense_mult0_AOT_task_block(h2pack, tid, i_blk0, x, y); } // End of "if (n_D0_blk-1 <= n_thread)" // 2. Off-diagonal blocks from inadmissible pairs matvec if (n_D1_blk <= n_thread) { int i_blk1 = tid; if (i_blk1 < n_D1_blk) H2P_matvec_dense_mult1_AOT_task_block(h2pack, tid, i_blk1, x, y); } else { #pragma omp for schedule(dynamic) nowait for (int i_blk1 = 0; i_blk1 < n_D1_blk; i_blk1++) H2P_matvec_dense_mult1_AOT_task_block(h2pack, tid, i_blk1, x, y); } // End of "if (n_D1_blk-1 <= n_thread)" thread_buf[tid]->timer += get_wtime_sec(); } // End of "pragma omp parallel" if (h2pack->print_timers == 1) { double max_t = 0.0, avg_t = 0.0, min_t = 19241112.0; for (int i = 0; i < n_thread; i++) { double thread_i_timer = thread_buf[i]->timer; avg_t += thread_i_timer; max_t = MAX(max_t, thread_i_timer); min_t = MIN(min_t, thread_i_timer); } avg_t /= (double) n_thread; INFO_PRINTF("Matvec dense multiplication: min/avg/max thread wall-time = %.3lf, %.3lf, %.3lf (s)\n", min_t, avg_t, max_t); } } // H2 matvec dense multiplication, calculate D_{ij} * x_j // Need to calculate all D_{ij} matrices before using it void H2P_matvec_dense_mult_JIT(H2Pack_p h2pack, const DTYPE *x) { int n_thread = h2pack->n_thread; int xpt_dim = h2pack->xpt_dim; int krnl_dim = h2pack->krnl_dim; int n_point = h2pack->n_point; int n_leaf_node = h2pack->n_leaf_node; int *r_inadm_pairs = (h2pack->is_HSS) ? h2pack->HSS_r_inadm_pairs : h2pack->r_inadm_pairs; int *leaf_nodes = h2pack->height_nodes; int *pt_cluster = h2pack->pt_cluster; int *mat_cluster = h2pack->mat_cluster; int *D_ncol = h2pack->D_ncol; DTYPE *coord = h2pack->coord; void *krnl_param = h2pack->krnl_param; H2P_int_vec_p D_blk0 = h2pack->D_blk0; H2P_int_vec_p D_blk1 = h2pack->D_blk1; kernel_eval_fptr krnl_eval = h2pack->krnl_eval; kernel_bimv_fptr krnl_bimv = h2pack->krnl_bimv; H2P_thread_buf_p *thread_buf = h2pack->tb; const int n_D0_blk = D_blk0->length - 1; const int n_D1_blk = D_blk1->length - 1; #pragma omp parallel num_threads(n_thread) { int tid = omp_get_thread_num(); H2P_dense_mat_p Di = thread_buf[tid]->mat0; H2P_dense_mat_p tmp = thread_buf[tid]->mat0; DTYPE *y = thread_buf[tid]->y; H2P_dense_mat_p workbuf = thread_buf[tid]->mat1; thread_buf[tid]->timer = -get_wtime_sec(); // 1. Diagonal blocks matvec #pragma omp for schedule(dynamic) nowait for (int i_blk0 = 0; i_blk0 < n_D0_blk; i_blk0++) { int D_blk0_s = D_blk0->data[i_blk0]; int D_blk0_e = D_blk0->data[i_blk0 + 1]; for (int i = D_blk0_s; i < D_blk0_e; i++) { int node = leaf_nodes[i]; int pt_s = pt_cluster[node * 2]; int vec_s = mat_cluster[node * 2]; int node_npt = pt_cluster[node * 2 + 1] - pt_s + 1; H2P_dense_mat_resize(tmp, node_npt * krnl_dim, 1); // Discard x_out_1 stored in tmp->data if (krnl_bimv != NULL) { DTYPE *y_spos = y + pt_s; const DTYPE *x_spos = x + pt_s; H2P_ext_krnl_bimv( coord + pt_s, n_point, node_npt, coord + pt_s, n_point, node_npt, x_spos, x_spos, y_spos, tmp->data, n_point, 0, n_point, 0, // ldi1 and ldo1 need to be 0 here! xpt_dim, krnl_dim, workbuf, krnl_param, krnl_bimv ); } else { DTYPE *y_spos = y + vec_s; const DTYPE *x_spos = x + vec_s; int Di_ncol = D_ncol[i]; int Di_nrow_128KB = (128 * 1024) / (sizeof(DTYPE) * Di_ncol); int Di_blk_npt = Di_nrow_128KB / krnl_dim; Di_nrow_128KB = Di_blk_npt * krnl_dim; H2P_dense_mat_resize(Di, Di_nrow_128KB, Di_ncol); H2P_krnl_eval_bimv( coord + pt_s, n_point, node_npt, coord + pt_s, n_point, node_npt, x_spos, x_spos, y_spos, tmp->data, krnl_dim, Di_blk_npt, Di->data, krnl_param, krnl_eval ); } } } // End of i_blk0 loop // 2. Off-diagonal blocks from inadmissible pairs matvec #pragma omp for schedule(dynamic) nowait for (int i_blk1 = 0; i_blk1 < n_D1_blk; i_blk1++) { int D_blk1_s = D_blk1->data[i_blk1]; int D_blk1_e = D_blk1->data[i_blk1 + 1]; for (int i = D_blk1_s; i < D_blk1_e; i++) { int node0 = r_inadm_pairs[2 * i]; int node1 = r_inadm_pairs[2 * i + 1]; int pt_s0 = pt_cluster[2 * node0]; int pt_s1 = pt_cluster[2 * node1]; int vec_s0 = mat_cluster[2 * node0]; int vec_s1 = mat_cluster[2 * node1]; int node0_npt = pt_cluster[2 * node0 + 1] - pt_s0 + 1; int node1_npt = pt_cluster[2 * node1 + 1] - pt_s1 + 1; if (krnl_bimv != NULL) { DTYPE *y_spos0 = y + pt_s0; DTYPE *y_spos1 = y + pt_s1; const DTYPE *x_spos0 = x + pt_s0; const DTYPE *x_spos1 = x + pt_s1; H2P_ext_krnl_bimv( coord + pt_s0, n_point, node0_npt, coord + pt_s1, n_point, node1_npt, x_spos1, x_spos0, y_spos0, y_spos1, n_point, n_point, n_point, n_point, xpt_dim, krnl_dim, workbuf, krnl_param, krnl_bimv ); } else { DTYPE *y_spos0 = y + vec_s0; DTYPE *y_spos1 = y + vec_s1; const DTYPE *x_spos0 = x + vec_s0; const DTYPE *x_spos1 = x + vec_s1; int Di_ncol = D_ncol[n_leaf_node + i]; int Di_nrow_128KB = (128 * 1024) / (sizeof(DTYPE) * Di_ncol); int Di_blk_npt = Di_nrow_128KB / krnl_dim; Di_nrow_128KB = Di_blk_npt * krnl_dim; H2P_dense_mat_resize(Di, Di_nrow_128KB, Di_ncol); H2P_krnl_eval_bimv( coord + pt_s0, n_point, node0_npt, coord + pt_s1, n_point, node1_npt, x_spos1, x_spos0, y_spos0, y_spos1, krnl_dim, Di_blk_npt, Di->data, krnl_param, krnl_eval ); } } } // End of i_blk1 loop thread_buf[tid]->timer += get_wtime_sec(); } // End of "pragma omp parallel" if (h2pack->print_timers == 1) { double max_t = 0.0, avg_t = 0.0, min_t = 19241112.0; for (int i = 0; i < n_thread; i++) { double thread_i_timer = thread_buf[i]->timer; avg_t += thread_i_timer; max_t = MAX(max_t, thread_i_timer); min_t = MIN(min_t, thread_i_timer); } avg_t /= (double) n_thread; INFO_PRINTF("Matvec dense multiplication: min/avg/max thread wall-time = %.3lf, %.3lf, %.3lf (s)\n", min_t, avg_t, max_t); } } // Permute the multiplicand vector from the original point ordering to the // sorted point ordering inside H2Pack void H2P_permute_vector_forward(H2Pack_p h2pack, const DTYPE *x, DTYPE *pmt_x) { gather_vector_elements(sizeof(DTYPE), h2pack->krnl_mat_size, h2pack->fwd_pmt_idx, x, pmt_x); } // Permute the output vector from the sorted point ordering inside H2Pack // to the original point ordering void H2P_permute_vector_backward(H2Pack_p h2pack, const DTYPE *x, DTYPE *pmt_x) { gather_vector_elements(sizeof(DTYPE), h2pack->krnl_mat_size, h2pack->bwd_pmt_idx, x, pmt_x); } // H2 representation multiplies a column vector void H2P_matvec(H2Pack_p h2pack, const DTYPE *x, DTYPE *y) { double st, et; int krnl_mat_size = h2pack->krnl_mat_size; int n_thread = h2pack->n_thread; int BD_JIT = h2pack->BD_JIT; int krnl_dim = h2pack->krnl_dim; int n_point = h2pack->n_point; int need_trans = ((h2pack->krnl_bimv != NULL) && (BD_JIT == 1) && (krnl_dim > 1)); DTYPE *xT = h2pack->xT; DTYPE *yT = h2pack->yT; DTYPE *pmt_x = h2pack->pmt_x; DTYPE *pmt_y = h2pack->pmt_y; double *timers = h2pack->timers; size_t *mat_size = h2pack->mat_size; H2P_thread_buf_p *thread_buf = h2pack->tb; DTYPE *x_ = need_trans ? xT : pmt_x; DTYPE *y_ = need_trans ? yT : pmt_y; // 1. Forward permute the input vector st = get_wtime_sec(); H2P_permute_vector_forward(h2pack, x, pmt_x); et = get_wtime_sec(); timers[MV_VOP_TIMER_IDX] += et - st; mat_size[MV_VOP_SIZE_IDX] += 2 * krnl_mat_size; // 2. Reset partial y result in each thread-local buffer to 0 st = get_wtime_sec(); #pragma omp parallel num_threads(n_thread) { int tid = omp_get_thread_num(); DTYPE *tid_y = thread_buf[tid]->y; memset(tid_y, 0, sizeof(DTYPE) * krnl_mat_size); #pragma omp for for (int i = 0; i < krnl_mat_size; i++) { pmt_y[i] = 0; yT[i] = 0; } } mat_size[MV_VOP_SIZE_IDX] += (2 + n_thread) * krnl_mat_size; if (need_trans) { H2P_transpose_dmat(n_thread, n_point, krnl_dim, pmt_x, krnl_dim, xT, n_point); mat_size[MV_VOP_SIZE_IDX] += 2 * krnl_mat_size; } et = get_wtime_sec(); timers[MV_VOP_TIMER_IDX] += et - st; // 3. Forward transformation, calculate U_j^T * x_j st = get_wtime_sec(); H2P_matvec_fwd_transform(h2pack, pmt_x); et = get_wtime_sec(); timers[MV_FWD_TIMER_IDX] += et - st; // 4. Intermediate multiplication, calculate B_{ij} * (U_j^T * x_j) st = get_wtime_sec(); if (BD_JIT == 1) { if (need_trans) H2P_transpose_y0_from_krnldim(h2pack); H2P_matvec_intmd_mult_JIT(h2pack, x_); if (need_trans) H2P_transpose_y1_to_krnldim(h2pack); } else { H2P_matvec_intmd_mult_AOT(h2pack, pmt_x); } et = get_wtime_sec(); timers[MV_MID_TIMER_IDX] += et - st; // 5. Backward transformation, calculate U_i * (B_{ij} * (U_j^T * x_j)) st = get_wtime_sec(); H2P_matvec_bwd_transform(h2pack, pmt_x, pmt_y); et = get_wtime_sec(); timers[MV_BWD_TIMER_IDX] += et - st; // 6. Dense multiplication, calculate D_i * x_i st = get_wtime_sec(); if (BD_JIT == 1) { H2P_matvec_dense_mult_JIT(h2pack, x_); } else { H2P_matvec_dense_mult_AOT(h2pack, pmt_x); } et = get_wtime_sec(); timers[MV_DEN_TIMER_IDX] += et - st; // 7. Reduce sum partial y results st = get_wtime_sec(); #pragma omp parallel num_threads(n_thread) { int tid = omp_get_thread_num(); int blk_spos, blk_len; calc_block_spos_len(krnl_mat_size, n_thread, tid, &blk_spos, &blk_len); for (int tid = 0; tid < n_thread; tid++) { DTYPE *y_src = thread_buf[tid]->y; #pragma omp simd for (int i = blk_spos; i < blk_spos + blk_len; i++) y_[i] += y_src[i]; } } mat_size[MV_VOP_SIZE_IDX] += (2 * n_thread + 1) * krnl_mat_size; // We use xT here to hold the transpose of yT if (need_trans) { H2P_transpose_dmat(n_thread, krnl_dim, n_point, yT, n_point, xT, krnl_dim); #pragma omp parallel for simd for (int i = 0; i < krnl_mat_size; i++) pmt_y[i] += xT[i]; mat_size[MV_VOP_SIZE_IDX] += 4 * krnl_mat_size; } et = get_wtime_sec(); timers[MV_VOP_TIMER_IDX] += et - st; // 8. Backward permute the output vector st = get_wtime_sec(); H2P_permute_vector_backward(h2pack, pmt_y, y); et = get_wtime_sec(); timers[MV_VOP_TIMER_IDX] += et - st; //mat_size[_MV_VOP_SIZE_IDX] += 2 * krnl_mat_size; h2pack->n_matvec++; }
GB_unop__identity_bool_uint32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__identity_bool_uint32 // op(A') function: GB_unop_tran__identity_bool_uint32 // C type: bool // A type: uint32_t // cast: bool cij = (bool) aij // unaryop: cij = aij #define GB_ATYPE \ uint32_t #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ bool z = (bool) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ bool z = (bool) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_BOOL || GxB_NO_UINT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__identity_bool_uint32 ( bool *Cx, // Cx and Ax may be aliased const uint32_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint32_t aij = Ax [p] ; bool z = (bool) aij ; Cx [p] = z ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__identity_bool_uint32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
pado_unw_unv_para.201912291420.debug_wrong_label_size.h
/* * pado.h * * Created on: Sep 4, 2018 * Author: Zhen Peng */ #ifndef INCLUDES_PADO_UNW_PARA_UNV_H_ #define INCLUDES_PADO_UNW_PARA_UNV_H_ #include <vector> #include <unordered_map> #include <map> #include <algorithm> #include <iostream> #include <limits.h> #include <xmmintrin.h> #include <bitset> #include <cmath> #include "globals.h" #include "graph.h" #include <omp.h> using std::vector; using std::unordered_map; using std::map; using std::bitset; using std::stable_sort; using std::min; using std::fill; namespace PADO { //inti NUM_THREADS = 4; //const inti BATCH_SIZE = 1024; // The size for regular batch and bit array. //const inti BITPARALLEL_SIZE = 50; //const inti THRESHOLD_PARALLEL = 80; //// Batch based processing, 09/11/2018 template<inti BATCH_SIZE = 1024> class ParaVertexCentricPLL { private: static const inti BITPARALLEL_SIZE = 50; idi num_v_ = 0; const inti THRESHOLD_PARALLEL = 80; // Structure for the type of label struct IndexType { struct Batch { idi batch_id; // Batch ID idi start_index; // Index to the array distances where the batch starts inti size; // Number of distances element in this batch Batch(idi batch_id_, idi start_index_, inti size_) : batch_id(batch_id_), start_index(start_index_), size(size_) { ; } }; struct DistanceIndexType { idi start_index; // Index to the array vertices where the same-ditance vertices start inti size; // Number of the same-distance vertices smalli dist; // The real distance DistanceIndexType(idi start_index_, inti size_, smalli dist_) : start_index(start_index_), size(size_), dist(dist_) { ; } }; smalli bp_dist[BITPARALLEL_SIZE]; uint64_t bp_sets[BITPARALLEL_SIZE][2]; // [0]: S^{-1}, [1]: S^{0} vector<Batch> batches; // Batch info vector<DistanceIndexType> distances; // Distance info vector<idi> vertices; // Vertices in the label, preresented as temperory ID }; //__attribute__((aligned(64))); // Structure for the type of temporary label struct ShortIndex { // I use BATCH_SIZE + 1 bit for indicator bit array. // The v.indicator[BATCH_SIZE] is set if in current batch v has got any new labels already. // In this way, when do initialization, only initialize those short_index[v] whose indicator[BATCH_SIZE] is set. bitset<BATCH_SIZE + 1> indicator; // Global indicator, indicator[r] (0 <= r < BATCH_SIZE) is set means root r once selected as candidate already // Use a queue to store candidates vector<inti> candidates_que = vector<inti>(BATCH_SIZE); inti end_candidates_que = 0; vector<uint8_t> is_candidate = vector<uint8_t>(BATCH_SIZE, 0); }; //__attribute__((aligned(64))); // Structure of the public ordered index for distance queries. struct IndexOrdered { weighti bp_dist[BITPARALLEL_SIZE]; uint64_t bp_sets[BITPARALLEL_SIZE][2]; // [0]: S^{-1}, [1]: S^{0} vector<idi> label_id; vector<weighti> label_dists; }; vector<IndexType> L; vector<IndexOrdered> Index; // Ordered labels for original vertex ID void construct(const Graph &G); inline void bit_parallel_labeling( const Graph &G, vector<IndexType> &L, vector<uint8_t> &used_bp_roots); // inline void bit_parallel_labeling( // const Graph &G, // vector<IndexType> &L, // vector<bool> &used_bp_roots); inline void batch_process( const Graph &G, idi b_id, idi roots_start, // start id of roots inti roots_size, // how many roots in the batch vector<IndexType> &L, const vector<uint8_t> &used_bp_roots, vector<idi> &active_queue, idi &end_active_queue, vector<idi> &candidate_queue, idi &end_candidate_queue, vector<ShortIndex> &short_index, vector<vector<smalli> > &dist_matrix, vector<uint8_t> &got_candidates, vector<uint8_t> &is_active, vector<idi> &once_candidated_queue, idi &end_once_candidated_queue, vector<uint8_t> &once_candidated); // inline void batch_process( // const Graph &G, // idi b_id, // idi root_start, // inti roots_size, // vector<IndexType> &L, // const vector<bool> &used_bp_roots); inline void initialize( vector<ShortIndex> &short_index, vector<vector<smalli> > &dist_matrix, vector<idi> &active_queue, idi &end_active_queue, vector<idi> &once_candidated_queue, idi &end_once_candidated_queue, // vector<bool> &once_candidated, vector<uint8_t> &once_candidated, idi b_id, idi roots_start, inti roots_size, vector<IndexType> &L, const vector<uint8_t> &used_bp_roots); inline void push_labels( idi v_head, idi roots_start, const Graph &G, const vector<IndexType> &L, vector<ShortIndex> &short_index, // vector<idi> &candidate_queue, // idi &end_candidate_queue, vector<idi> &tmp_candidate_queue, idi &size_tmp_candidate_queue, const idi offset_tmp_queue, // idi &offset_tmp_candidate_queue, // vector<bool> &got_candidates, vector<uint8_t> &got_candidates, vector<idi> &once_candidated_queue, idi &end_once_candidated_queue, // vector<bool> &once_candidated, vector<uint8_t> &once_candidated, const vector<uint8_t> &used_bp_roots, smalli iter); inline bool distance_query( idi cand_root_id, idi v_id, idi roots_start, const vector<IndexType> &L, const vector<vector<smalli> > &dist_matrix, smalli iter); inline void insert_label_only( idi cand_root_id, idi v_id, idi roots_start, inti roots_size, vector<IndexType> &L, vector<vector<smalli> > &dist_matrix, smalli iter); inline void update_label_indices( idi v_id, idi inserted_count, vector<IndexType> &L, vector<ShortIndex> &short_index, idi b_id, smalli iter); inline void reset_at_end( idi roots_start, inti roots_size, vector<IndexType> &L, vector<vector<smalli> > &dist_matrix); // Some parallel interfaces inline idi prefix_sum_for_offsets( vector<idi> &offsets); template<typename T> inline void collect_into_queue( vector<T> &tmp_queue, vector<idi> &offsets_tmp_queue, // the locations in tmp_queue for writing from tmp_queue vector<idi> &offsets_queue, // the locations in queue for writing into queue. idi num_elements, // total number of elements which need to be added from tmp_queue to queue vector<T> &queue, idi &end_queue); template<typename T, typename Int> inline void TS_enqueue( vector<T> &queue, Int &end_queue, const T &e); // Test only // uint64_t normal_hit_count = 0; // uint64_t bp_hit_count = 0; // uint64_t total_check_count = 0; // double initializing_time = 0; // double candidating_time = 0; // double adding_time = 0; // double distance_query_time = 0; // double init_index_time = 0; // double init_dist_matrix_time = 0; // double init_start_reset_time = 0; // double init_indicators_time = 0; //#ifdef PROFILE // vector<double> thds_adding_time = vector<double>(80, 0.0); // vector<uint64_t> thds_adding_count = vector<uint64_t>(80, 0); // L2CacheMissRate cache_miss; //#endif // vector<ShortIndex> tmp_short_index; // vector<ShortIndex> now_short_index; // End test public: ParaVertexCentricPLL() = default; ParaVertexCentricPLL(const Graph &G); weighti query( idi u, idi v); void print(); void switch_labels_to_old_id( const vector<idi> &rank2id, const vector<idi> &rank); void store_index_to_file( const char *filename, const vector<idi> &rank); void load_index_from_file( const char *filename); void order_labels( const vector<idi> &rank2id, const vector<idi> &rank); weighti query_distance( idi a, idi b); }; // class ParaVertexCentricPLL template<inti BATCH_SIZE> const inti ParaVertexCentricPLL<BATCH_SIZE>::BITPARALLEL_SIZE; template<inti BATCH_SIZE> ParaVertexCentricPLL<BATCH_SIZE>::ParaVertexCentricPLL(const Graph &G) { construct(G); } template<inti BATCH_SIZE> inline void ParaVertexCentricPLL<BATCH_SIZE>::bit_parallel_labeling( const Graph &G, vector<IndexType> &L, vector<uint8_t> &used_bp_roots) // CAS needs array { idi num_v = G.get_num_v(); idi num_e = G.get_num_e(); if (num_v <= BITPARALLEL_SIZE) { // if (true) {} // Sequential version std::vector<weighti> tmp_d(num_v); // distances from the root to every v std::vector<std::pair<uint64_t, uint64_t> > tmp_s(num_v); // first is S_r^{-1}, second is S_r^{0} std::vector<idi> que(num_v); // active queue std::vector<std::pair<idi, idi> > sibling_es( num_e); // siblings, their distances to the root are equal (have difference of 0) std::vector<std::pair<idi, idi> > child_es( num_e); // child and father, their distances to the root have difference of 1. idi r = 0; // root r for (inti i_bpspt = 0; i_bpspt < BITPARALLEL_SIZE; ++i_bpspt) { while (r < num_v && used_bp_roots[r]) { ++r; } if (r == num_v) { for (idi v = 0; v < num_v; ++v) { L[v].bp_dist[i_bpspt] = SMALLI_MAX; } continue; } used_bp_roots[r] = 1; fill(tmp_d.begin(), tmp_d.end(), SMALLI_MAX); fill(tmp_s.begin(), tmp_s.end(), std::make_pair(0, 0)); idi que_t0 = 0, que_t1 = 0, que_h = 0; que[que_h++] = r; tmp_d[r] = 0; que_t1 = que_h; int ns = 0; // number of selected neighbor, default 64 // the edge of one vertex in G is ordered decreasingly to rank, lower rank first, so here need to traverse edges backward // There was a bug cost countless time: the unsigned iterator i might decrease to zero and then flip to the INF. // idi i_bound = G.vertices[r] - 1; // idi i_start = i_bound + G.out_degrees[r]; // for (idi i = i_start; i > i_bound; --i) {} idi d_i_bound = G.out_degrees[r]; idi i_start = G.vertices[r] + d_i_bound - 1; for (idi d_i = 0; d_i < d_i_bound; ++d_i) { idi i = i_start - d_i; idi v = G.out_edges[i]; if (!used_bp_roots[v]) { used_bp_roots[v] = 1; // Algo3:line4: for every v in S_r, (dist[v], S_r^{-1}[v], S_r^{0}[v]) <- (1, {v}, empty_set) que[que_h++] = v; tmp_d[v] = 1; tmp_s[v].first = 1ULL << ns; if (++ns == 64) break; } } for (weighti d = 0; que_t0 < que_h; ++d) { idi num_sibling_es = 0, num_child_es = 0; for (idi que_i = que_t0; que_i < que_t1; ++que_i) { idi v = que[que_i]; idi i_start = G.vertices[v]; idi i_bound = i_start + G.out_degrees[v]; for (idi i = i_start; i < i_bound; ++i) { idi tv = G.out_edges[i]; weighti td = d + 1; if (d > tmp_d[tv]) { ; } else if (d == tmp_d[tv]) { if (v < tv) { // ??? Why need v < tv !!! Because it's a undirected graph. sibling_es[num_sibling_es].first = v; sibling_es[num_sibling_es].second = tv; ++num_sibling_es; // tmp_s[v].second |= tmp_s[tv].first; // tmp_s[tv].second |= tmp_s[v].first; } } else { // d < tmp_d[tv] if (tmp_d[tv] == SMALLI_MAX) { que[que_h++] = tv; tmp_d[tv] = td; } child_es[num_child_es].first = v; child_es[num_child_es].second = tv; ++num_child_es; // tmp_s[tv].first |= tmp_s[v].first; // tmp_s[tv].second |= tmp_s[v].second; } } } for (idi i = 0; i < num_sibling_es; ++i) { idi v = sibling_es[i].first, w = sibling_es[i].second; tmp_s[v].second |= tmp_s[w].first; tmp_s[w].second |= tmp_s[v].first; } for (idi i = 0; i < num_child_es; ++i) { idi v = child_es[i].first, c = child_es[i].second; tmp_s[c].first |= tmp_s[v].first; tmp_s[c].second |= tmp_s[v].second; } que_t0 = que_t1; que_t1 = que_h; } for (idi v = 0; v < num_v; ++v) { L[v].bp_dist[i_bpspt] = tmp_d[v]; L[v].bp_sets[i_bpspt][0] = tmp_s[v].first; // S_r^{-1} L[v].bp_sets[i_bpspt][1] = tmp_s[v].second & ~tmp_s[v].first; // Only need those r's neighbors who are not already in S_r^{-1} } } } else { // Parallel version: Naive parallel enqueue std::vector<weighti> tmp_d(num_v); // distances from the root to every v std::vector<std::pair<uint64_t, uint64_t> > tmp_s(num_v); // first is S_r^{-1}, second is S_r^{0} std::vector<idi> que(num_v); // active queue std::vector<std::pair<idi, idi> > sibling_es( num_e); // siblings, their distances to the root are equal (have difference of 0) std::vector<std::pair<idi, idi> > child_es( num_e); // child and father, their distances to the root have difference of 1. idi r = 0; // root r for (inti i_bpspt = 0; i_bpspt < BITPARALLEL_SIZE; ++i_bpspt) { while (r < num_v && used_bp_roots[r]) { ++r; } if (r == num_v) { for (idi v = 0; v < num_v; ++v) { L[v].bp_dist[i_bpspt] = SMALLI_MAX; } continue; } used_bp_roots[r] = 1; fill(tmp_d.begin(), tmp_d.end(), SMALLI_MAX); fill(tmp_s.begin(), tmp_s.end(), std::make_pair(0, 0)); idi que_t0 = 0, que_t1 = 0, que_h = 0; que[que_h++] = r; tmp_d[r] = 0; que_t1 = que_h; int ns = 0; // number of selected neighbor, default 64 // the edge of one vertex in G is ordered decreasingly to rank, lower rank first, so here need to traverse edges backward // There was a bug cost countless time: the unsigned iterator i might decrease to zero and then flip to the INF. // idi i_bound = G.vertices[r] - 1; // idi i_start = i_bound + G.out_degrees[r]; // for (idi i = i_start; i > i_bound; --i) {} idi d_i_bound = G.out_degrees[r]; idi i_start = G.vertices[r] + d_i_bound - 1; for (idi d_i = 0; d_i < d_i_bound; ++d_i) { idi i = i_start - d_i; idi v = G.out_edges[i]; if (!used_bp_roots[v]) { used_bp_roots[v] = 1; // Algo3:line4: for every v in S_r, (dist[v], S_r^{-1}[v], S_r^{0}[v]) <- (1, {v}, empty_set) que[que_h++] = v; tmp_d[v] = 1; tmp_s[v].first = 1ULL << ns; if (++ns == 64) break; } } for (weighti d = 0; que_t0 < que_h; ++d) { idi num_sibling_es = 0, num_child_es = 0; for (idi que_i = que_t0; que_i < que_t1; ++que_i) { idi v = que[que_i]; idi i_start = G.vertices[v]; idi i_bound = i_start + G.out_degrees[v]; for (idi i = i_start; i < i_bound; ++i) { idi tv = G.out_edges[i]; weighti td = d + 1; if (d > tmp_d[tv]) { ; } else if (d == tmp_d[tv]) { if (v < tv) { // ??? Why need v < tv !!! Because it's a undirected graph. sibling_es[num_sibling_es].first = v; sibling_es[num_sibling_es].second = tv; ++num_sibling_es; // tmp_s[v].second |= tmp_s[tv].first; // tmp_s[tv].second |= tmp_s[v].first; } } else { // d < tmp_d[tv] if (tmp_d[tv] == SMALLI_MAX) { que[que_h++] = tv; tmp_d[tv] = td; } child_es[num_child_es].first = v; child_es[num_child_es].second = tv; ++num_child_es; // tmp_s[tv].first |= tmp_s[v].first; // tmp_s[tv].second |= tmp_s[v].second; } } } for (idi i = 0; i < num_sibling_es; ++i) { idi v = sibling_es[i].first, w = sibling_es[i].second; tmp_s[v].second |= tmp_s[w].first; tmp_s[w].second |= tmp_s[v].first; } for (idi i = 0; i < num_child_es; ++i) { idi v = child_es[i].first, c = child_es[i].second; tmp_s[c].first |= tmp_s[v].first; tmp_s[c].second |= tmp_s[v].second; } que_t0 = que_t1; que_t1 = que_h; } #pragma omp parallel for for (idi v = 0; v < num_v; ++v) { L[v].bp_dist[i_bpspt] = tmp_d[v]; // L[v].bp_sets_0[i_bpspt] = tmp_s[v].first; // S_r^{-1} // L[v].bp_sets_1[i_bpspt] = tmp_s[v].second & ~tmp_s[v].first; // Only need those r's neighbors who are not already in S_r^{-1} L[v].bp_sets[i_bpspt][0] = tmp_s[v].first; // S_r^{-1} L[v].bp_sets[i_bpspt][1] = tmp_s[v].second & ~tmp_s[v].first; // Only need those r's neighbors who are not already in S_r^{-1} } } } } //inline void ParaVertexCentricPLL::bit_parallel_labeling( // const Graph &G, // vector<IndexType> &L, // vector<uint8_t> &used_bp_roots) //{ // idi num_v = G.get_num_v(); // idi num_e = G.get_num_e(); // //// std::vector<smalli> tmp_d(num_v); // distances from the root to every v // smalli *tmp_d = (smalli *) malloc(num_v * sizeof(smalli)); // std::vector<std::pair<uint64_t, uint64_t> > tmp_s(num_v); // first is S_r^{-1}, second is S_r^{0} // std::vector<idi> que(num_v); // active queue // std::vector< std::pair<idi, idi> > sibling_es(num_e); // siblings, their distances to the root are equal (have difference of 0) // std::vector< std::pair<idi, idi> > child_es(num_e); // child and father, their distances to the root have difference of 1. // // idi r = 0; // root r // for (inti i_bpspt = 0; i_bpspt < BITPARALLEL_SIZE; ++i_bpspt) { // while (r < num_v && used_bp_roots[r]) { // ++r; // } // if (r == num_v) { // for (idi v = 0; v < num_v; ++v) { // L[v].bp_dist[i_bpspt] = SMALLI_MAX; // } // continue; // } // used_bp_roots[r] = 1; // //// fill(tmp_d.begin(), tmp_d.end(), SMALLI_MAX); // memset(tmp_d, (uint8_t) -1, num_v * sizeof(smalli)); // fill(tmp_s.begin(), tmp_s.end(), std::make_pair(0, 0)); // // idi que_t0 = 0, que_t1 = 0, que_h = 0; // que[que_h++] = r; // tmp_d[r] = 0; // que_t1 = que_h; // // int ns = 0; // number of selected neighbor, default 64 // // the edge of one vertex in G is ordered decreasingly to rank, lower rank first, so here need to traverse edges backward // idi i_bound = G.vertices[r] - 1; // idi i_start = i_bound + G.out_degrees[r]; // for (idi i = i_start; i > i_bound; --i) { // idi v = G.out_edges[i]; // if (!used_bp_roots[v]) { // used_bp_roots[v] = 1; // // Algo3:line4: for every v in S_r, (dist[v], S_r^{-1}[v], S_r^{0}[v]) <- (1, {v}, empty_set) // que[que_h++] = v; // tmp_d[v] = 1; // tmp_s[v].first = 1ULL << ns; // if (++ns == 64) break; // } // } // // for (smalli d = 0; que_t0 < que_h; ++d) { // idi num_sibling_es = 0, num_child_es = 0; // // // For parallel adding to que // idi que_size = que_t1 - que_t0; // vector<idi> offsets_tmp_queue(que_size); //#pragma omp parallel for // for (idi i_q = 0; i_q < que_size; ++i_q) { // offsets_tmp_queue[i_q] = G.out_degrees[que[que_t0 + i_q]]; // } // idi num_neighbors = prefix_sum_for_offsets(offsets_tmp_queue); // vector<idi> tmp_que(num_neighbors); // vector<idi> sizes_tmp_que(que_size, 0); // // For parallel adding to sibling_es // vector< pair<idi, idi> > tmp_sibling_es(num_neighbors); // vector<idi> sizes_tmp_sibling_es(que_size, 0); // // For parallel adding to child_es // vector< pair<idi, idi> > tmp_child_es(num_neighbors); // vector<idi> sizes_tmp_child_es(que_size, 0); // //#pragma omp parallel for // for (idi que_i = que_t0; que_i < que_t1; ++que_i) { // idi tmp_que_i = que_i - que_t0; // location in the tmp_que // idi v = que[que_i]; // idi i_start = G.vertices[v]; // idi i_bound = i_start + G.out_degrees[v]; // for (idi i = i_start; i < i_bound; ++i) { // idi tv = G.out_edges[i]; // smalli td = d + 1; // // if (d > tmp_d[tv]) { // ; // } // else if (d == tmp_d[tv]) { // if (v < tv) { // ??? Why need v < tv !!! Because it's a undirected graph. // idi &size_in_group = sizes_tmp_sibling_es[tmp_que_i]; // tmp_sibling_es[offsets_tmp_queue[tmp_que_i] + size_in_group].first = v; // tmp_sibling_es[offsets_tmp_queue[tmp_que_i] + size_in_group].second = tv; // ++size_in_group; //// sibling_es[num_sibling_es].first = v; //// sibling_es[num_sibling_es].second = tv; //// ++num_sibling_es; // } // } else { // d < tmp_d[tv] // if (tmp_d[tv] == SMALLI_MAX) { // if (CAS(tmp_d + tv, SMALLI_MAX, td)) { // tmp_d[tv] = td // tmp_que[offsets_tmp_queue[tmp_que_i] + sizes_tmp_que[tmp_que_i]++] = tv; // } // } //// if (tmp_d[tv] == SMALLI_MAX) { //// que[que_h++] = tv; //// tmp_d[tv] = td; //// } // idi &size_in_group = sizes_tmp_child_es[tmp_que_i]; // tmp_child_es[offsets_tmp_queue[tmp_que_i] + size_in_group].first = v; // tmp_child_es[offsets_tmp_queue[tmp_que_i] + size_in_group].second = tv; // ++size_in_group; //// child_es[num_child_es].first = v; //// child_es[num_child_es].second = tv; //// ++num_child_es; // } // } // } // // // From tmp_sibling_es to sibling_es // idi total_sizes_tmp_queue = prefix_sum_for_offsets(sizes_tmp_sibling_es); // collect_into_queue( // tmp_sibling_es, // offsets_tmp_queue, // sizes_tmp_sibling_es, // total_sizes_tmp_queue, // sibling_es, // num_sibling_es); // //#pragma omp parallel for // for (idi i = 0; i < num_sibling_es; ++i) { // idi v = sibling_es[i].first, w = sibling_es[i].second; // __sync_or_and_fetch(&tmp_s[v].second, tmp_s[w].first); // __sync_or_and_fetch(&tmp_s[w].second, tmp_s[v].first); //// tmp_s[v].second |= tmp_s[w].first; //// tmp_s[w].second |= tmp_s[v].first; // } // // // From tmp_child_es to child_es // total_sizes_tmp_queue = prefix_sum_for_offsets(sizes_tmp_child_es); // collect_into_queue( // tmp_child_es, // offsets_tmp_queue, // sizes_tmp_child_es, // total_sizes_tmp_queue, // child_es, // num_child_es); // //#pragma omp parallel for // for (idi i = 0; i < num_child_es; ++i) { // idi v = child_es[i].first, c = child_es[i].second; // __sync_or_and_fetch(&tmp_s[c].first, tmp_s[v].first); // __sync_or_and_fetch(&tmp_s[c].second, tmp_s[v].second); //// tmp_s[c].first |= tmp_s[v].first; //// tmp_s[c].second |= tmp_s[v].second; // } // // // From tmp_que to que // total_sizes_tmp_queue = prefix_sum_for_offsets(sizes_tmp_que); // collect_into_queue( // tmp_que, // offsets_tmp_queue, // sizes_tmp_que, // total_sizes_tmp_queue, // que, // que_h); // // que_t0 = que_t1; // que_t1 = que_h; // } // //#pragma omp parallel for // for (idi v = 0; v < num_v; ++v) { // L[v].bp_dist[i_bpspt] = tmp_d[v]; // L[v].bp_sets[i_bpspt][0] = tmp_s[v].first; // S_r^{-1} // L[v].bp_sets[i_bpspt][1] = tmp_s[v].second & ~tmp_s[v].first; // Only need those r's neighbors who are not already in S_r^{-1} // } // } // // free(tmp_d); //} // Function for initializing at the begin of a batch // For a batch, initialize the temporary labels and real labels of roots; // traverse roots' labels to initialize distance buffer; // unset flag arrays is_active and got_labels template<inti BATCH_SIZE> inline void ParaVertexCentricPLL<BATCH_SIZE>::initialize( vector<ShortIndex> &short_index, vector<vector<smalli> > &dist_matrix, vector<idi> &active_queue, idi &end_active_queue, vector<idi> &once_candidated_queue, idi &end_once_candidated_queue, // vector<bool> &once_candidated, vector<uint8_t> &once_candidated, idi b_id, idi roots_start, inti roots_size, vector<IndexType> &L, const vector<uint8_t> &used_bp_roots) { idi roots_bound = roots_start + roots_size; // init_start_reset_time -= WallTimer::get_time_mark(); // TODO: parallel enqueue { // active_queue for (idi r_real_id = roots_start; r_real_id < roots_bound; ++r_real_id) { if (!used_bp_roots[r_real_id]) { active_queue[end_active_queue++] = r_real_id; } } } // init_start_reset_time += WallTimer::get_time_mark(); // init_index_time -= WallTimer::get_time_mark(); // Short_index { // init_indicators_time -= WallTimer::get_time_mark(); if (end_once_candidated_queue >= THRESHOLD_PARALLEL) { #pragma omp parallel for for (idi v_i = 0; v_i < end_once_candidated_queue; ++v_i) { idi v = once_candidated_queue[v_i]; short_index[v].indicator.reset(); once_candidated[v] = 0; } } else { for (idi v_i = 0; v_i < end_once_candidated_queue; ++v_i) { idi v = once_candidated_queue[v_i]; short_index[v].indicator.reset(); once_candidated[v] = 0; } } //#pragma omp parallel for // for (idi v_i = 0; v_i < end_once_candidated_queue; ++v_i) { // idi v = once_candidated_queue[v_i]; // short_index[v].indicator.reset(); // once_candidated[v] = 0; // } end_once_candidated_queue = 0; if (roots_size >= THRESHOLD_PARALLEL) { #pragma omp parallel for for (idi v = roots_start; v < roots_bound; ++v) { if (!used_bp_roots[v]) { short_index[v].indicator.set(v - roots_start); short_index[v].indicator.set(BATCH_SIZE); // v got labels } } } else { for (idi v = roots_start; v < roots_bound; ++v) { if (!used_bp_roots[v]) { short_index[v].indicator.set(v - roots_start); short_index[v].indicator.set(BATCH_SIZE); // v got labels } } } // for (idi v = roots_start; v < roots_bound; ++v) { // if (!used_bp_roots[v]) { // short_index[v].indicator.set(v - roots_start); // short_index[v].indicator.set(BATCH_SIZE); // v got labels // } // } // init_indicators_time += WallTimer::get_time_mark(); } // // Real Index { if (roots_size >= THRESHOLD_PARALLEL) { #pragma omp parallel for for (idi r_id = 0; r_id < roots_size; ++r_id) { if (used_bp_roots[r_id + roots_start]) { continue; } IndexType &Lr = L[r_id + roots_start]; Lr.batches.push_back(IndexType::Batch( b_id, // Batch ID Lr.distances.size(), // start_index 1)); // size Lr.distances.push_back(IndexType::DistanceIndexType( Lr.vertices.size(), // start_index 1, // size 0)); // dist Lr.vertices.push_back(r_id); } } else { for (idi r_id = 0; r_id < roots_size; ++r_id) { if (used_bp_roots[r_id + roots_start]) { continue; } IndexType &Lr = L[r_id + roots_start]; Lr.batches.push_back(IndexType::Batch( b_id, // Batch ID Lr.distances.size(), // start_index 1)); // size Lr.distances.push_back(IndexType::DistanceIndexType( Lr.vertices.size(), // start_index 1, // size 0)); // dist Lr.vertices.push_back(r_id); } } // for (idi r_id = 0; r_id < roots_size; ++r_id) { // if (used_bp_roots[r_id + roots_start]) { // continue; // } // IndexType &Lr = L[r_id + roots_start]; // Lr.batches.push_back(IndexType::Batch( // b_id, // Batch ID // Lr.distances.size(), // start_index // 1)); // size // Lr.distances.push_back(IndexType::DistanceIndexType( // Lr.vertices.size(), // start_index // 1, // size // 0)); // dist // Lr.vertices.push_back(r_id); // } } // init_index_time += WallTimer::get_time_mark(); // init_dist_matrix_time -= WallTimer::get_time_mark(); // Dist_matrix { if (roots_size >= THRESHOLD_PARALLEL) { // schedule dynamic is slower #pragma omp parallel for for (idi r_id = 0; r_id < roots_size; ++r_id) { if (used_bp_roots[r_id + roots_start]) { continue; } IndexType &Lr = L[r_id + roots_start]; inti b_i_bound = Lr.batches.size(); _mm_prefetch(&Lr.batches[0], _MM_HINT_T0); _mm_prefetch(&Lr.distances[0], _MM_HINT_T0); _mm_prefetch(&Lr.vertices[0], _MM_HINT_T0); for (inti b_i = 0; b_i < b_i_bound; ++b_i) { idi id_offset = Lr.batches[b_i].batch_id * BATCH_SIZE; idi dist_start_index = Lr.batches[b_i].start_index; idi dist_bound_index = dist_start_index + Lr.batches[b_i].size; // Traverse dist_matrix for (idi dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) { idi v_start_index = Lr.distances[dist_i].start_index; idi v_bound_index = v_start_index + Lr.distances[dist_i].size; smalli dist = Lr.distances[dist_i].dist; for (idi v_i = v_start_index; v_i < v_bound_index; ++v_i) { dist_matrix[r_id][Lr.vertices[v_i] + id_offset] = dist; } } } } } else { inti b_i_bound; idi id_offset; idi dist_start_index; idi dist_bound_index; idi v_start_index; idi v_bound_index; smalli dist; for (idi r_id = 0; r_id < roots_size; ++r_id) { if (used_bp_roots[r_id + roots_start]) { continue; } IndexType &Lr = L[r_id + roots_start]; b_i_bound = Lr.batches.size(); _mm_prefetch(&Lr.batches[0], _MM_HINT_T0); _mm_prefetch(&Lr.distances[0], _MM_HINT_T0); _mm_prefetch(&Lr.vertices[0], _MM_HINT_T0); for (inti b_i = 0; b_i < b_i_bound; ++b_i) { id_offset = Lr.batches[b_i].batch_id * BATCH_SIZE; dist_start_index = Lr.batches[b_i].start_index; dist_bound_index = dist_start_index + Lr.batches[b_i].size; // Traverse dist_matrix for (idi dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) { v_start_index = Lr.distances[dist_i].start_index; v_bound_index = v_start_index + Lr.distances[dist_i].size; dist = Lr.distances[dist_i].dist; for (idi v_i = v_start_index; v_i < v_bound_index; ++v_i) { dist_matrix[r_id][Lr.vertices[v_i] + id_offset] = dist; } } } } } // inti b_i_bound; // idi id_offset; // idi dist_start_index; // idi dist_bound_index; // idi v_start_index; // idi v_bound_index; // smalli dist; // for (idi r_id = 0; r_id < roots_size; ++r_id) { // if (used_bp_roots[r_id + roots_start]) { // continue; // } // IndexType &Lr = L[r_id + roots_start]; // b_i_bound = Lr.batches.size(); // _mm_prefetch(&Lr.batches[0], _MM_HINT_T0); // _mm_prefetch(&Lr.distances[0], _MM_HINT_T0); // _mm_prefetch(&Lr.vertices[0], _MM_HINT_T0); // for (inti b_i = 0; b_i < b_i_bound; ++b_i) { // id_offset = Lr.batches[b_i].batch_id * BATCH_SIZE; // dist_start_index = Lr.batches[b_i].start_index; // dist_bound_index = dist_start_index + Lr.batches[b_i].size; // // Traverse dist_matrix // for (idi dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) { // v_start_index = Lr.distances[dist_i].start_index; // v_bound_index = v_start_index + Lr.distances[dist_i].size; // dist = Lr.distances[dist_i].dist; // for (idi v_i = v_start_index; v_i < v_bound_index; ++v_i) { // dist_matrix[r_id][Lr.vertices[v_i] + id_offset] = dist; // } // } // } // } } // init_dist_matrix_time += WallTimer::get_time_mark(); } // Function that pushes v_head's labels to v_head's every neighbor template<inti BATCH_SIZE> inline void ParaVertexCentricPLL<BATCH_SIZE>::push_labels( idi v_head, idi roots_start, const Graph &G, const vector<IndexType> &L, vector<ShortIndex> &short_index, // vector<idi> &candidate_queue, // idi &end_candidate_queue, vector<idi> &tmp_candidate_queue, idi &size_tmp_candidate_queue, const idi offset_tmp_queue, // idi &offset_tmp_queue, // vector<bool> &got_candidates, vector<uint8_t> &got_candidates, // vector<idi> &once_candidated_queue, // idi &end_once_candidated_queue, vector<idi> &tmp_once_candidated_queue, idi &size_tmp_once_candidated_queue, // vector<bool> &once_candidated, vector<uint8_t> &once_candidated, const vector<uint8_t> &used_bp_roots, smalli iter) { const IndexType &Lv = L[v_head]; // These 2 index are used for traversing v_head's last inserted labels idi l_i_start = Lv.distances.rbegin()->start_index; idi l_i_bound = l_i_start + Lv.distances.rbegin()->size; // Traverse v_head's every neighbor v_tail idi e_i_start = G.vertices[v_head]; idi e_i_bound = e_i_start + G.out_degrees[v_head]; for (idi e_i = e_i_start; e_i < e_i_bound; ++e_i) { idi v_tail = G.out_edges[e_i]; if (used_bp_roots[v_head]) { continue; } if (v_tail < roots_start) { // v_tail has higher rank than any roots, then no roots can push new labels to it. return; } // if (v_tail <= Lv.vertices[l_i_start] + roots_start) { // v_tail has higher rank than any v_head's labels // return; // } // This condition cannot be used anymore since v_head's last inserted labels are not ordered from higher rank to lower rank now, because v_head's candidate set is a queue now rather than a bitmap. For a queue, its order of candidates are not ordered by ranks. const IndexType &L_tail = L[v_tail]; _mm_prefetch(&L_tail.bp_dist[0], _MM_HINT_T0); _mm_prefetch(&L_tail.bp_sets[0][0], _MM_HINT_T0); // Traverse v_head's last inserted labels for (idi l_i = l_i_start; l_i < l_i_bound; ++l_i) { inti label_root_id = Lv.vertices[l_i]; idi label_real_id = label_root_id + roots_start; if (v_tail <= label_real_id) { // v_tail has higher rank than all remaining labels // For candidates_que, this is not true any more! // break; continue; } ShortIndex &SI_v_tail = short_index[v_tail]; if (SI_v_tail.indicator[label_root_id]) { // The label is already selected before continue; } // Record label_root_id as once selected by v_tail SI_v_tail.indicator.set(label_root_id); // {// Deal with data race // volatile char lock = 0; // if (CAS((void *) (&lock), static_cast<char>(0), static_cast<char>(1))) { // SI_v_tail.indicator.set(label_root_id); // } else { // continue; // } // } {//test // Check v_tail's indicator if (!SI_v_tail.indicator[label_root_id]) { printf("L%u: B%u short_index[%u].indicator[%u]: %u which should be 1.\n", __LINE__, roots_start / BATCH_SIZE, v_tail, label_real_id, (idi) SI_v_tail.indicator[label_root_id]); } // else { // printf("L%u: T%u: B%u: l_i: %u " // "short_index[%u].indicator[%u]: %u\n", // __LINE__, omp_get_thread_num(), roots_start / BATCH_SIZE, l_i, // v_tail, label_real_id, (idi) SI_v_tail.indicator[label_root_id]); // } } // Add into once_candidated_queue if (!once_candidated[v_tail]) { // If v_tail is not in the once_candidated_queue yet, add it in if (CAS(&once_candidated[v_tail], (uint8_t) 0, (uint8_t) 1)) { tmp_once_candidated_queue[offset_tmp_queue + size_tmp_once_candidated_queue++] = v_tail; } } // CHANGED! // Bit Parallel Checking: if label_real_id to v_tail has shorter distance already // ++total_check_count; const IndexType &L_label = L[label_real_id]; bool no_need_add = false; _mm_prefetch(&L_label.bp_dist[0], _MM_HINT_T0); _mm_prefetch(&L_label.bp_sets[0][0], _MM_HINT_T0); for (inti i = 0; i < BITPARALLEL_SIZE; ++i) { inti td = L_label.bp_dist[i] + L_tail.bp_dist[i]; if (td - 2 <= iter) { td += (L_label.bp_sets[i][0] & L_tail.bp_sets[i][0]) ? -2 : ((L_label.bp_sets[i][0] & L_tail.bp_sets[i][1]) | (L_label.bp_sets[i][1] & L_tail.bp_sets[i][0])) ? -1 : 0; if (td <= iter) { no_need_add = true; // ++bp_hit_count; break; } } } if (no_need_add) { continue; } // Record vertex label_root_id as v_tail's candidates label // SI_v_tail.candidates.set(label_root_id); // if (!SI_v_tail.is_candidate[label_root_id]) { // SI_v_tail.is_candidate[label_root_id] = true; // SI_v_tail.candidates_que[SI_v_tail.end_candidates_que++] = label_root_id; // } if (!SI_v_tail.is_candidate[label_root_id]) { if (CAS(&SI_v_tail.is_candidate[label_root_id], (uint8_t) 0, (uint8_t) 1)) { TS_enqueue(SI_v_tail.candidates_que, SI_v_tail.end_candidates_que, label_root_id); // volatile inti old_v = SI_v_tail.end_candidates_que; // volatile inti new_v = old_v + 1; // while (!CAS(&SI_v_tail.end_candidates_que, old_v, new_v)) { // old_v = SI_v_tail.end_candidates_que; // new_v = old_v + 1; // } // SI_v_tail.candidates_que[old_v] = label_root_id; { SI_v_tail.indicator.set(label_root_id); } {//test // Check v_tail's indicator if (!SI_v_tail.indicator[label_root_id]) { printf("L%u: T%u: B%u: l_i: %u iter: %u " "short_index[%u].indicator[%u]: %u which should be 1.\n", __LINE__, omp_get_thread_num(), roots_start / BATCH_SIZE, l_i, iter, v_tail, label_real_id, (idi) SI_v_tail.indicator[label_root_id]); } } } } // Add into candidate_queue if (!got_candidates[v_tail]) { // If v_tail is not in candidate_queue, add it in (prevent duplicate) if (CAS(&got_candidates[v_tail], (uint8_t) 0, (uint8_t) 1)) { tmp_candidate_queue[offset_tmp_queue + size_tmp_candidate_queue++] = v_tail; } } // // Add into once_candidated_queue //#pragma omp critical // if (!once_candidated[v_tail]) { // // If v_tail is not in the once_candidated_queue yet, add it in // once_candidated[v_tail] = true; // once_candidated_queue[end_once_candidated_queue++] = v_tail; // } // // Add into candidate_queue // if (!got_candidates[v_tail]) { // // If v_tail is not in candidate_queue, add it in (prevent duplicate) // got_candidates[v_tail] = true; // candidate_queue[end_candidate_queue++] = v_tail; // } } } // printf("v_head: %u, size_tmp_candidate_queue: %u\n", v_head, size_tmp_candidate_queue);//test } // Function for distance query; // traverse vertex v_id's labels; // return the distance between v_id and cand_root_id based on existing labels. // return false if shorter distance exists already, return true if the cand_root_id can be added into v_id's label. template<inti BATCH_SIZE> inline bool ParaVertexCentricPLL<BATCH_SIZE>::distance_query( idi cand_root_id, idi v_id, idi roots_start, const vector<IndexType> &L, const vector<vector<smalli> > &dist_matrix, smalli iter) { // ++total_check_count; // distance_query_time -= WallTimer::get_time_mark(); idi cand_real_id = cand_root_id + roots_start; const IndexType &Lv = L[v_id]; // Traverse v_id's all existing labels inti b_i_bound = Lv.batches.size(); _mm_prefetch(&Lv.batches[0], _MM_HINT_T0); _mm_prefetch(&Lv.distances[0], _MM_HINT_T0); _mm_prefetch(&Lv.vertices[0], _MM_HINT_T0); _mm_prefetch(&dist_matrix[cand_root_id][0], _MM_HINT_T0); for (inti b_i = 0; b_i < b_i_bound; ++b_i) { idi id_offset = Lv.batches[b_i].batch_id * BATCH_SIZE; idi dist_start_index = Lv.batches[b_i].start_index; idi dist_bound_index = dist_start_index + Lv.batches[b_i].size; // Traverse dist_matrix for (idi dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) { inti dist = Lv.distances[dist_i].dist; if (dist >= iter) { // In a batch, the labels' distances are increasingly ordered. // If the half path distance is already greater than their targeted distance, jump to next batch break; } idi v_start_index = Lv.distances[dist_i].start_index; idi v_bound_index = v_start_index + Lv.distances[dist_i].size; // _mm_prefetch(&dist_matrix[cand_root_id][0], _MM_HINT_T0); for (idi v_i = v_start_index; v_i < v_bound_index; ++v_i) { idi v = Lv.vertices[v_i] + id_offset; // v is a label hub of v_id {//test if (v == cand_real_id) { printf("T%u: " "In distance_query: v_id %u had got (%u, %u) in B%u, but is being pushed (%u, %u) in B%u again.\n", omp_get_thread_num(), v_id, v, dist, Lv.batches[b_i].batch_id, cand_real_id, iter, roots_start / BATCH_SIZE); // printf("tmp_short_index[%u].indicator[%u]: %u " // "now_short_index[%u].indicator[%u]: %u\n", // v_id, cand_real_id, // (idi) tmp_short_index[v_id].indicator[cand_root_id], // v_id, cand_real_id, // (idi) now_short_index[v_id].indicator[cand_root_id]); } } if (v >= cand_real_id) { // Vertex cand_real_id cannot have labels whose ranks are lower than it, // in which case dist_matrix[cand_root_id][v] does not exit. continue; } inti d_tmp = dist + dist_matrix[cand_root_id][v]; {//test if (v == cand_real_id) { printf("d_tmp: %u dist: %u dist_matrix[%u][%u]: %u\n", d_tmp, dist, cand_real_id, v, dist_matrix[cand_root_id][v]); } } if (d_tmp <= iter) { // distance_query_time += WallTimer::get_time_mark(); // ++normal_hit_count; return false; } } } } // distance_query_time += WallTimer::get_time_mark(); return true; } // Function inserts candidate cand_root_id into vertex v_id's labels; // update the distance buffer dist_matrix; // but it only update the v_id's labels' vertices array; template<inti BATCH_SIZE> inline void ParaVertexCentricPLL<BATCH_SIZE>::insert_label_only( idi cand_root_id, idi v_id, idi roots_start, inti roots_size, vector<IndexType> &L, vector<vector<smalli> > &dist_matrix, smalli iter) { L[v_id].vertices.push_back(cand_root_id); // Update the distance buffer if necessary idi v_root_id = v_id - roots_start; if (v_id >= roots_start && v_root_id < roots_size) { dist_matrix[v_root_id][cand_root_id + roots_start] = iter; } } // Function updates those index arrays in v_id's label only if v_id has been inserted new labels template<inti BATCH_SIZE> inline void ParaVertexCentricPLL<BATCH_SIZE>::update_label_indices( idi v_id, idi inserted_count, vector<IndexType> &L, vector<ShortIndex> &short_index, idi b_id, smalli iter) { IndexType &Lv = L[v_id]; // indicator[BATCH_SIZE + 1] is true, means v got some labels already in this batch if (short_index[v_id].indicator[BATCH_SIZE]) { // Increase the batches' last element's size because a new distance element need to be added ++(Lv.batches.rbegin()->size); } else { short_index[v_id].indicator.set(BATCH_SIZE); // Insert a new Batch with batch_id, start_index, and size because a new distance element need to be added Lv.batches.push_back(IndexType::Batch( b_id, Lv.distances.size(), 1)); } // Insert a new distance element with start_index, size, and dist Lv.distances.push_back(IndexType::DistanceIndexType( Lv.vertices.size() - inserted_count, inserted_count, iter)); } // Function to reset dist_matrix the distance buffer to INF // Traverse every root's labels to reset its distance buffer elements to INF. // In this way to reduce the cost of initialization of the next batch. template<inti BATCH_SIZE> inline void ParaVertexCentricPLL<BATCH_SIZE>::reset_at_end( idi roots_start, inti roots_size, vector<IndexType> &L, vector<vector<smalli> > &dist_matrix) { if (roots_size >= THRESHOLD_PARALLEL) { #pragma omp parallel for for (idi r_id = 0; r_id < roots_size; ++r_id) { IndexType &Lr = L[r_id + roots_start]; inti b_i_bound = Lr.batches.size(); _mm_prefetch(&Lr.batches[0], _MM_HINT_T0); _mm_prefetch(&Lr.distances[0], _MM_HINT_T0); _mm_prefetch(&Lr.vertices[0], _MM_HINT_T0); for (inti b_i = 0; b_i < b_i_bound; ++b_i) { idi id_offset = Lr.batches[b_i].batch_id * BATCH_SIZE; idi dist_start_index = Lr.batches[b_i].start_index; idi dist_bound_index = dist_start_index + Lr.batches[b_i].size; // Traverse dist_matrix for (idi dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) { idi v_start_index = Lr.distances[dist_i].start_index; idi v_bound_index = v_start_index + Lr.distances[dist_i].size; for (idi v_i = v_start_index; v_i < v_bound_index; ++v_i) { dist_matrix[r_id][Lr.vertices[v_i] + id_offset] = SMALLI_MAX; } } } } } else { inti b_i_bound; idi id_offset; idi dist_start_index; idi dist_bound_index; idi v_start_index; idi v_bound_index; for (idi r_id = 0; r_id < roots_size; ++r_id) { IndexType &Lr = L[r_id + roots_start]; b_i_bound = Lr.batches.size(); _mm_prefetch(&Lr.batches[0], _MM_HINT_T0); _mm_prefetch(&Lr.distances[0], _MM_HINT_T0); _mm_prefetch(&Lr.vertices[0], _MM_HINT_T0); for (inti b_i = 0; b_i < b_i_bound; ++b_i) { id_offset = Lr.batches[b_i].batch_id * BATCH_SIZE; dist_start_index = Lr.batches[b_i].start_index; dist_bound_index = dist_start_index + Lr.batches[b_i].size; // Traverse dist_matrix for (idi dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) { v_start_index = Lr.distances[dist_i].start_index; v_bound_index = v_start_index + Lr.distances[dist_i].size; for (idi v_i = v_start_index; v_i < v_bound_index; ++v_i) { dist_matrix[r_id][Lr.vertices[v_i] + id_offset] = SMALLI_MAX; } } } } } // inti b_i_bound; // idi id_offset; // idi dist_start_index; // idi dist_bound_index; // idi v_start_index; // idi v_bound_index; // for (idi r_id = 0; r_id < roots_size; ++r_id) { // IndexType &Lr = L[r_id + roots_start]; // b_i_bound = Lr.batches.size(); // _mm_prefetch(&Lr.batches[0], _MM_HINT_T0); // _mm_prefetch(&Lr.distances[0], _MM_HINT_T0); // _mm_prefetch(&Lr.vertices[0], _MM_HINT_T0); // for (inti b_i = 0; b_i < b_i_bound; ++b_i) { // id_offset = Lr.batches[b_i].batch_id * BATCH_SIZE; // dist_start_index = Lr.batches[b_i].start_index; // dist_bound_index = dist_start_index + Lr.batches[b_i].size; // // Traverse dist_matrix // for (idi dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) { // v_start_index = Lr.distances[dist_i].start_index; // v_bound_index = v_start_index + Lr.distances[dist_i].size; // for (idi v_i = v_start_index; v_i < v_bound_index; ++v_i) { // dist_matrix[r_id][Lr.vertices[v_i] + id_offset] = SMALLI_MAX; // } // } // } // } } template<inti BATCH_SIZE> inline void ParaVertexCentricPLL<BATCH_SIZE>::batch_process( const Graph &G, idi b_id, idi roots_start, // start id of roots inti roots_size, // how many roots in the batch vector<IndexType> &L, const vector<uint8_t> &used_bp_roots, vector<idi> &active_queue, idi &end_active_queue, vector<idi> &candidate_queue, idi &end_candidate_queue, vector<ShortIndex> &short_index, vector<vector<smalli> > &dist_matrix, vector<uint8_t> &got_candidates, vector<uint8_t> &is_active, vector<idi> &once_candidated_queue, idi &end_once_candidated_queue, vector<uint8_t> &once_candidated) //inline void ParaVertexCentricPLL::batch_process( // const Graph &G, // idi b_id, // idi roots_start, // start id of roots // inti roots_size, // how many roots in the batch // vector<IndexType> &L, // const vector<bool> &used_bp_roots) { // initializing_time -= WallTimer::get_time_mark(); // static const idi num_v = G.get_num_v(); // static vector<idi> active_queue(num_v); // static idi end_active_queue = 0; // static vector<idi> candidate_queue(num_v); // static idi end_candidate_queue = 0; // static vector<ShortIndex> short_index(num_v); // static vector< vector<smalli> > dist_matrix(roots_size, vector<smalli>(num_v, SMALLI_MAX)); // static uint8_t *got_candidates = (uint8_t *) calloc(num_v, sizeof(uint8_t)); // need raw integer type to do CAS. // static uint8_t *is_active = (uint8_t *) calloc(num_v, sizeof(uint8_t)); // static vector<idi> once_candidated_queue(num_v); // The vertex who got some candidates in this batch is in the once_candidated_queue. // static idi end_once_candidated_queue = 0; // static uint8_t *once_candidated = (uint8_t *) calloc(num_v, sizeof(uint8_t)); // need raw integer type to do CAS. // At the beginning of a batch, initialize the labels L and distance buffer dist_matrix; // printf("initializing...\n");//test initialize( short_index, dist_matrix, active_queue, end_active_queue, once_candidated_queue, end_once_candidated_queue, once_candidated, b_id, roots_start, roots_size, L, used_bp_roots); smalli iter = 0; // The iterator, also the distance for current iteration // initializing_time += WallTimer::get_time_mark(); {//test // now_short_index.assign(short_index.begin(), short_index.end()); } while (0 != end_active_queue) { // candidating_time -= WallTimer::get_time_mark(); ++iter; {//test // tmp_short_index.swap(now_short_index); } // Pushing // printf("pushing...\n");//test { // Prepare for parallel processing the active_queue and adding to candidate_queue. // Every vertex's offset location in tmp_candidate_queue // It's used for every thread to write into tmp_candidate_queue and tmp_once_candidated_queue vector<idi> offsets_tmp_queue(end_active_queue); #pragma omp parallel for for (idi i_queue = 0; i_queue < end_active_queue; ++i_queue) { // Traverse all active vertices, get their out degrees. offsets_tmp_queue[i_queue] = G.out_degrees[active_queue[i_queue]]; } idi num_neighbors = prefix_sum_for_offsets(offsets_tmp_queue); // every thread writes to tmp_candidate_queue at its offset location vector<idi> tmp_candidate_queue(num_neighbors); // A vector to store the true number of pushed neighbors of every active vertex. vector<idi> sizes_tmp_candidate_queue(end_active_queue, 0); // similarly, every thread writes to tmp_once_candidated_queue at its offset location vector<idi> tmp_once_candidated_queue(num_neighbors); // And store the true number of new added once-candidated vertices. vector<idi> sizes_tmp_once_candidated_queue(end_active_queue, 0); // Traverse active vertices to push their labels as candidates // schedule dynamic is slower #pragma omp parallel for //TODO: turn on OpenMP for (idi i_queue = 0; i_queue < end_active_queue; ++i_queue) { idi v_head = active_queue[i_queue]; is_active[v_head] = 0; // reset is_active push_labels( v_head, roots_start, G, L, short_index, // candidate_queue, // end_candidate_queue, tmp_candidate_queue, sizes_tmp_candidate_queue[i_queue], offsets_tmp_queue[i_queue], got_candidates, // once_candidated_queue, // end_once_candidated_queue, tmp_once_candidated_queue, sizes_tmp_once_candidated_queue[i_queue], once_candidated, used_bp_roots, iter); } {//test // now_short_index.assign(short_index.begin(), short_index.end()); } // According to sizes_tmp_candidate_queue, get the offset for inserting to the real queue idi total_new = prefix_sum_for_offsets(sizes_tmp_candidate_queue); // Collect all candidate vertices from tmp_candidate_queue into candidate_queue. collect_into_queue( tmp_candidate_queue, offsets_tmp_queue, // the locations in tmp_queue for writing from tmp_queue sizes_tmp_candidate_queue, // the locations in queue for writing into queue. total_new, // total number of elements which need to be added from tmp_queue to queue candidate_queue, end_candidate_queue); // Get the offset for inserting to the real queue. total_new = prefix_sum_for_offsets(sizes_tmp_once_candidated_queue); // Collect all once-candidated vertices from tmp_once_candidated_queue into once_candidated_queue collect_into_queue( tmp_once_candidated_queue, offsets_tmp_queue, sizes_tmp_once_candidated_queue, total_new, once_candidated_queue, end_once_candidated_queue); // printf("end_candidate_queue: %u\n", end_candidate_queue); fflush(stdout);//test end_active_queue = 0; // Set the active_queue empty } // candidating_time += WallTimer::get_time_mark(); if (end_candidate_queue == 0) { break; } // adding_time -= WallTimer::get_time_mark(); // Adding // printf("adding...\n");//test { ////////////////////////////////////////////////////////////////////////////////// // OpenMP Version // Prepare for parallel processing the candidate_queue and adding to active_queue. // Every vertex's offset location in tmp_active_queue is i_queue * roots_size // It's used for every thread to write into tmp_candidate_queue and tmp_once_candidated_queue vector<idi> offsets_tmp_queue(end_candidate_queue); #pragma omp parallel for for (idi i_queue = 0; i_queue < end_candidate_queue; ++i_queue) { // Traverse all active vertices, get their out degrees. // A ridiculous bug here. The v_id will, if any, only add itself to the active queue. //offsets_tmp_queue[i_queue] = i_queue * roots_size; offsets_tmp_queue[i_queue] = i_queue; } // every thread writes to tmp_candidate_queue at its offset location vector<idi> tmp_active_queue(end_candidate_queue); // A vector to store the true number of pushed neighbors of every active vertex. vector<idi> sizes_tmp_active_queue(end_candidate_queue, 0); // Traverse vertices in the candidate_queue to insert labels // Here schedule dynamic will be slower //#ifdef PROFILE // cache_miss.measure_start(); //#endif #pragma omp parallel for schedule(dynamic) for (idi i_queue = 0; i_queue < end_candidate_queue; ++i_queue) { //#ifdef PROFILE // inti tid = omp_get_thread_num(); // thds_adding_time[tid] -= WallTimer::get_time_mark(); //#endif idi v_id = candidate_queue[i_queue]; inti inserted_count = 0; //recording number of v_id's truly inserted candidates got_candidates[v_id] = 0; // reset got_candidates inti bound_cand_i = short_index[v_id].end_candidates_que; for (inti cand_i = 0; cand_i < bound_cand_i; ++cand_i) { inti cand_root_id = short_index[v_id].candidates_que[cand_i]; {//test // Check v_id's indicator if (!short_index[v_id].indicator[cand_root_id]) { printf("L%u: T%u: B%u: iter: %u " "short_index[%u].indicator[%u]: %u which should be 1.\n", __LINE__, omp_get_thread_num(), b_id, iter, v_id, cand_root_id + roots_start, (idi) short_index[v_id].indicator[cand_root_id]); } } short_index[v_id].is_candidate[cand_root_id] = 0; // Reset is_candidate // Only insert cand_root_id into v_id's label if its distance to v_id is shorter than existing distance if (distance_query( cand_root_id, v_id, roots_start, L, dist_matrix, iter)) { if (!is_active[v_id]) { is_active[v_id] = 1; tmp_active_queue[offsets_tmp_queue[i_queue] + sizes_tmp_active_queue[i_queue]++] = v_id; } // if (!be_active) { // be_active = true; // } // if (!is_active[v_id]) { // is_active[v_id] = true; // active_queue[end_active_queue++] = v_id; // } ++inserted_count; // The candidate cand_root_id needs to be added into v_id's label insert_label_only( cand_root_id, v_id, roots_start, roots_size, L, dist_matrix, iter); {//test // // Check v_id's indicator if (!short_index[v_id].indicator[cand_root_id]) { printf("L:%u T%u: B%u iter: %u " "short_index[%u].indicator[%u]: %u which should be 1.\n", __LINE__, omp_get_thread_num(), b_id, iter, v_id, cand_root_id + roots_start, (idi) short_index[v_id].indicator[cand_root_id]); } // Traverse all v_id's labels and check if cand_root_id is there const IndexType &Lv = L[v_id]; inti b_i_bound = Lv.batches.size(); _mm_prefetch(&Lv.batches[0], _MM_HINT_T0); _mm_prefetch(&Lv.distances[0], _MM_HINT_T0); _mm_prefetch(&Lv.vertices[0], _MM_HINT_T0); _mm_prefetch(&dist_matrix[cand_root_id][0], _MM_HINT_T0); for (inti b_i = 0; b_i < b_i_bound; ++b_i) { idi id_offset = Lv.batches[b_i].batch_id * BATCH_SIZE; idi dist_start_index = Lv.batches[b_i].start_index; idi dist_bound_index = dist_start_index + Lv.batches[b_i].size; // Traverse dist_matrix for (idi dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) { inti dist = Lv.distances[dist_i].dist; idi v_start_index = Lv.distances[dist_i].start_index; idi v_bound_index = v_start_index + Lv.distances[dist_i].size; for (idi v_i = v_start_index; v_i < v_bound_index; ++v_i) { idi v = Lv.vertices[v_i] + id_offset; // v is a label hub of v_id if (v == cand_root_id + roots_start) { printf("! T%u: " "v_id %u already got (%u, %u), rather than (%u, %u)\n", omp_get_thread_num(), v_id, v, dist, cand_root_id + roots_start, iter); // exit(-1); } } } } } } } short_index[v_id].end_candidates_que = 0; // if (be_active) { // if (CAS(&is_active[v_id], (uint8_t) 0, (uint8_t) 1)) { // tmp_active_queue[offsets_tmp_queue[i_queue] + sizes_tmp_active_queue[i_queue]++] = v_id; // } // } if (0 != inserted_count) { // Update other arrays in L[v_id] if new labels were inserted in this iteration update_label_indices( v_id, inserted_count, L, short_index, b_id, iter); } } // According to sizes_tmp_active_queue, get the offset for inserting to the real queue idi total_new = prefix_sum_for_offsets(sizes_tmp_active_queue); // Collect all candidate vertices from tmp_candidate_queue into candidate_queue. collect_into_queue( tmp_active_queue, offsets_tmp_queue, // the locations in tmp_queue for writing from tmp_queue sizes_tmp_active_queue, // the locations in queue for writing into queue. total_new, // total number of elements which need to be added from tmp_queue to queue active_queue, end_active_queue); end_candidate_queue = 0; // Set the candidate_queue empty ////////////////////////////////////////////////////////////////////////////////// ////// Sequential version // for (idi i_queue = 0; i_queue < end_candidate_queue; ++i_queue) { // idi v_id = candidate_queue[i_queue]; // inti inserted_count = 0; //recording number of v_id's truly inserted candidates // got_candidates[v_id] = false; // reset got_candidates // // Traverse v_id's all candidates // inti bound_cand_i = short_index[v_id].end_candidates_que; // for (inti cand_i = 0; cand_i < bound_cand_i; ++cand_i) { // inti cand_root_id = short_index[v_id].candidates_que[cand_i]; // short_index[v_id].is_candidate[cand_root_id] = false; // // Only insert cand_root_id into v_id's label if its distance to v_id is shorter than existing distance // if ( distance_query( // cand_root_id, // v_id, // roots_start, // L, // dist_matrix, // iter) ) { // if (!is_active[v_id]) { // is_active[v_id] = true; // active_queue[end_active_queue++] = v_id; // } // ++inserted_count; // // The candidate cand_root_id needs to be added into v_id's label // insert_label_only( // cand_root_id, // v_id, // roots_start, // roots_size, // L, // dist_matrix, // iter); // } // } // short_index[v_id].end_candidates_que = 0; //// } // if (0 != inserted_count) { // // Update other arrays in L[v_id] if new labels were inserted in this iteration // update_label_indices( // v_id, // inserted_count, // L, // short_index, // b_id, // iter); // } // } // end_candidate_queue = 0; // Set the candidate_queue empty ////////////////////////////////////////////////////////////////////////////////////// } // adding_time += WallTimer::get_time_mark(); } // Reset the dist_matrix // initializing_time -= WallTimer::get_time_mark(); // init_dist_matrix_time -= WallTimer::get_time_mark(); reset_at_end( roots_start, roots_size, L, dist_matrix); // init_dist_matrix_time += WallTimer::get_time_mark(); // initializing_time += WallTimer::get_time_mark(); // double total_time = time_can + time_add; // printf("Candidating time: %f (%f%%)\n", time_can, time_can / total_time * 100); // printf("Adding time: %f (%f%%)\n", time_add, time_add / total_time * 100); } template<inti BATCH_SIZE> void ParaVertexCentricPLL<BATCH_SIZE>::construct(const Graph &G) { // initializing_time -= WallTimer::get_time_mark(); idi num_v = G.get_num_v(); num_v_ = num_v; L.resize(num_v); idi remainer = num_v % BATCH_SIZE; idi b_i_bound = num_v / BATCH_SIZE; // uint8_t *used_bp_roots = (uint8_t *) calloc(num_v, sizeof(uint8_t)); vector<uint8_t> used_bp_roots(num_v, 0); vector<idi> active_queue(num_v); idi end_active_queue = 0; vector<idi> candidate_queue(num_v); idi end_candidate_queue = 0; vector<ShortIndex> short_index(num_v); vector<vector<smalli> > dist_matrix(BATCH_SIZE, vector<smalli>(num_v, SMALLI_MAX)); // uint8_t *got_candidates = (uint8_t *) calloc(num_v, sizeof(uint8_t)); // need raw integer type to do CAS. // uint8_t *is_active = (uint8_t *) calloc(num_v, sizeof(uint8_t)); // need raw integer type to do CAS. vector<uint8_t> got_candidates(num_v, 0); vector<uint8_t> is_active(num_v, 0); vector<idi> once_candidated_queue( num_v); // The vertex who got some candidates in this batch is in the once_candidated_queue. idi end_once_candidated_queue = 0; // uint8_t *once_candidated = (uint8_t *) calloc(num_v, sizeof(uint8_t)); // need raw integer type to do CAS. vector<uint8_t> once_candidated(num_v, 0); // initializing_time += WallTimer::get_time_mark(); double time_labeling = -WallTimer::get_time_mark(); //double bp_labeling_time = -WallTimer::get_time_mark(); // printf("BP labeling...\n"); //test bit_parallel_labeling( G, L, used_bp_roots); //bp_labeling_time += WallTimer::get_time_mark(); for (idi b_i = 0; b_i < b_i_bound; ++b_i) { // printf("b_i: %u\n", b_i);//test batch_process( G, b_i, b_i * BATCH_SIZE, BATCH_SIZE, L, used_bp_roots, active_queue, end_active_queue, candidate_queue, end_candidate_queue, short_index, dist_matrix, got_candidates, is_active, once_candidated_queue, end_once_candidated_queue, once_candidated); // batch_process( // G, // b_i, // b_i * BATCH_SIZE, // BATCH_SIZE, // L, // used_bp_roots); } if (remainer != 0) { // printf("b_i: %u the last batch\n", b_i_bound);//test batch_process( G, b_i_bound, b_i_bound * BATCH_SIZE, remainer, L, used_bp_roots, active_queue, end_active_queue, candidate_queue, end_candidate_queue, short_index, dist_matrix, got_candidates, is_active, once_candidated_queue, end_once_candidated_queue, once_candidated); // batch_process( // G, // b_i_bound, // b_i_bound * BATCH_SIZE, // remainer, // L, // used_bp_roots); } time_labeling += WallTimer::get_time_mark(); // free(got_candidates); // free(is_active); // free(once_candidated); // free(used_bp_roots); // Test printf("Threads: %u Batch_size: %u\n", NUM_THREADS, BATCH_SIZE); //printf("BP_labeling: %.2f %.2f%%\n", bp_labeling_time, bp_labeling_time / time_labeling * 100); printf("BP_Roots_Size: %u\n", BITPARALLEL_SIZE); // printf("Initializing: %.2f %.2f%%\n", initializing_time, initializing_time / time_labeling * 100); // printf("\tinit_start_reset_time: %f (%f%%)\n", init_start_reset_time, init_start_reset_time / initializing_time * 100); // printf("\tinit_index_time: %f (%f%%)\n", init_index_time, init_index_time / initializing_time * 100); // printf("\t\tinit_indicators_time: %f (%f%%)\n", init_indicators_time, init_indicators_time / init_index_time * 100); // printf("\tinit_dist_matrix_time: %f (%f%%)\n", init_dist_matrix_time, init_dist_matrix_time / initializing_time * 100); // printf("Candidating: %.2f %.2f%%\n", candidating_time, candidating_time / time_labeling * 100); // printf("Adding: %.2f %.2f%%\n", adding_time, adding_time / time_labeling * 100); // printf("\tdistance_query_time: %f (%f%%)\n", distance_query_time, distance_query_time / adding_time * 100); // printf("\ttotal_check_count: %llu\n", total_check_count); // printf("\tbp_hit_count (to total_check): %llu (%f%%)\n", // bp_hit_count, // bp_hit_count * 100.0 / total_check_count); // printf("\tnormal_hit_count (to total_check, to normal_check): %llu (%f%%, %f%%)\n", // normal_hit_count, // normal_hit_count * 100.0 / total_check_count, // normal_hit_count * 100.0 / (total_check_count - bp_hit_count)); #ifdef PROFILE uint64_t total_thds_adding_count = 0; double total_thds_adding_time = 0; for (inti tid = 0; tid < NUM_THREADS; ++tid) { total_thds_adding_count += thds_adding_count[tid]; total_thds_adding_time += thds_adding_time[tid]; } printf("Threads_adding_count:"); for (inti tid = 0; tid < NUM_THREADS; ++tid) { printf(" %lu(%.2f%%)", thds_adding_count[tid], thds_adding_count[tid] * 100.0 / total_thds_adding_count); } puts(""); printf("Threads_adding_time:"); for (inti tid = 0; tid < NUM_THREADS; ++tid) { printf(" %f(%.2f%%)", thds_adding_time[tid], thds_adding_time[tid] * 100.0 / total_thds_adding_time); } puts(""); //printf("Threads_adding_average_time:"); //for (inti tid = 0; tid < NUM_THREADS; ++tid) { // printf(" %f", thds_adding_time[tid] / thds_adding_count[tid]); //} puts(""); cache_miss.print(); #endif printf("Total_labeling_time: %.2f seconds\n", time_labeling); // End test } // Function to get the prefix sum of elements in offsets template<inti BATCH_SIZE> inline idi ParaVertexCentricPLL<BATCH_SIZE>::prefix_sum_for_offsets( vector<idi> &offsets) { idi size_offsets = offsets.size(); if (1 == size_offsets) { idi tmp = offsets[0]; offsets[0] = 0; return tmp; } else if (size_offsets < 2048) { idi offset_sum = 0; idi size = size_offsets; for (idi i = 0; i < size; ++i) { idi tmp = offsets[i]; offsets[i] = offset_sum; offset_sum += tmp; } return offset_sum; } else { // Parallel Prefix Sum, based on Guy E. Blelloch's Prefix Sums and Their Applications idi last_element = offsets[size_offsets - 1]; // idi size = 1 << ((idi) log2(size_offsets - 1) + 1); idi size = 1 << ((idi) log2(size_offsets)); // vector<idi> nodes(size, 0); idi tmp_element = offsets[size - 1]; //#pragma omp parallel for // for (idi i = 0; i < size_offsets; ++i) { // nodes[i] = offsets[i]; // } // Up-Sweep (Reduce) Phase idi log2size = log2(size); for (idi d = 0; d < log2size; ++d) { idi by = 1 << (d + 1); #pragma omp parallel for for (idi k = 0; k < size; k += by) { offsets[k + (1 << (d + 1)) - 1] += offsets[k + (1 << d) - 1]; } } // Down-Sweep Phase offsets[size - 1] = 0; for (idi d = log2(size) - 1; d != (idi) -1; --d) { idi by = 1 << (d + 1); #pragma omp parallel for for (idi k = 0; k < size; k += by) { idi t = offsets[k + (1 << d) - 1]; offsets[k + (1 << d) - 1] = offsets[k + (1 << (d + 1)) - 1]; offsets[k + (1 << (d + 1)) - 1] += t; } } //#pragma omp parallel for // for (idi i = 0; i < size_offsets; ++i) { // offsets[i] = nodes[i]; // } if (size != size_offsets) { idi tmp_sum = offsets[size - 1] + tmp_element; for (idi i = size; i < size_offsets; ++i) { idi t = offsets[i]; offsets[i] = tmp_sum; tmp_sum += t; } } return offsets[size_offsets - 1] + last_element; } // // Get the offset as the prefix sum of out degrees // idi offset_sum = 0; // idi size = offsets.size(); // for (idi i = 0; i < size; ++i) { // idi tmp = offsets[i]; // offsets[i] = offset_sum; // offset_sum += tmp; // } // return offset_sum; //// Parallel Prefix Sum, based on Guy E. Blelloch's Prefix Sums and Their Applications // idi size_offsets = offsets.size(); // idi last_element = offsets[size_offsets - 1]; //// idi size = 1 << ((idi) log2(size_offsets - 1) + 1); // idi size = 1 << ((idi) log2(size_offsets)); //// vector<idi> nodes(size, 0); // idi tmp_element = offsets[size - 1]; ////#pragma omp parallel for //// for (idi i = 0; i < size_offsets; ++i) { //// nodes[i] = offsets[i]; //// } // // // Up-Sweep (Reduce) Phase // idi log2size = log2(size); // for (idi d = 0; d < log2size; ++d) { // idi by = 1 << (d + 1); //#pragma omp parallel for // for (idi k = 0; k < size; k += by) { // offsets[k + (1 << (d + 1)) - 1] += offsets[k + (1 << d) - 1]; // } // } // // // Down-Sweep Phase // offsets[size - 1] = 0; // for (idi d = log2(size) - 1; d != (idi) -1 ; --d) { // idi by = 1 << (d + 1); //#pragma omp parallel for // for (idi k = 0; k < size; k += by) { // idi t = offsets[k + (1 << d) - 1]; // offsets[k + (1 << d) - 1] = offsets[k + (1 << (d + 1)) - 1]; // offsets[k + (1 << (d + 1)) - 1] += t; // } // } // ////#pragma omp parallel for //// for (idi i = 0; i < size_offsets; ++i) { //// offsets[i] = nodes[i]; //// } // if (size != offsets.size()) { // idi tmp_sum = offsets[size - 1] + tmp_element; // idi i_bound = offsets.size(); // for (idi i = size; i < i_bound; ++i) { // idi t = offsets[i]; // offsets[i] = tmp_sum; // tmp_sum += t; // } // } // // return offsets[size_offsets - 1] + last_element; } // Collect elements in the tmp_queue into the queue template<inti BATCH_SIZE> template<typename T> inline void ParaVertexCentricPLL<BATCH_SIZE>::collect_into_queue( // vector<idi> &tmp_queue, vector<T> &tmp_queue, vector<idi> &offsets_tmp_queue, // the locations in tmp_queue for writing from tmp_queue vector<idi> &offsets_queue, // the locations in queue for writing into queue. idi num_elements, // total number of elements which need to be added from tmp_queue to queue // vector<idi> &queue, vector<T> &queue, idi &end_queue) { if (0 == num_elements) { return; } idi i_bound = offsets_tmp_queue.size(); #pragma omp parallel for for (idi i = 0; i < i_bound; ++i) { idi i_q_start = end_queue + offsets_queue[i]; idi i_q_bound; if (i_bound - 1 != i) { i_q_bound = end_queue + offsets_queue[i + 1]; } else { i_q_bound = end_queue + num_elements; } if (i_q_start == i_q_bound) { // If the group has no elements to be added, then continue to the next group continue; } idi end_tmp = offsets_tmp_queue[i]; for (idi i_q = i_q_start; i_q < i_q_bound; ++i_q) { queue[i_q] = tmp_queue[end_tmp++]; } } end_queue += num_elements; } // Function: thread-save enqueue. The queue has enough size already. An index points the end of the queue. template<inti BATCH_SIZE> template<typename T, typename Int> inline void ParaVertexCentricPLL<BATCH_SIZE>::TS_enqueue( vector<T> &queue, Int &end_queue, const T &e) { volatile Int old_i = end_queue; volatile Int new_i = old_i + 1; while (!CAS(&end_queue, old_i, new_i)) { old_i = end_queue; new_i = old_i + 1; } queue[old_i] = e; } template<inti BATCH_SIZE> void ParaVertexCentricPLL<BATCH_SIZE>::store_index_to_file( const char *filename, const vector<idi> &rank) { // TODO: fout comment out // std::ofstream fout(filename); // if (!fout.is_open()) { // fprintf(stderr, "Error: cannot open file %s\n", filename); // exit(EXIT_FAILURE); // } // std::string txt_filename = std::string(filename) + ".txt";//test // std::ofstream txt_out(txt_filename.c_str()); // Store into file the number of vertices and the number of bit-parallel roots. uint64_t labels_count = 0; // fout.write((char *) &num_v_, sizeof(num_v_)); // fout.write((char *) &BITPARALLEL_SIZE, sizeof(BITPARALLEL_SIZE)); for (idi v_id = 0; v_id < num_v_; ++v_id) { idi v_rank = rank[v_id]; const IndexType &Lv = L[v_rank]; idi size_labels = Lv.vertices.size(); labels_count += size_labels; // // Store Bit-parallel Labels into file. // for (inti b_i = 0; b_i < BITPARALLEL_SIZE; ++b_i) { // weighti d = Lv.bp_dist[b_i]; // uint64_t s0 = Lv.bp_sets[b_i][0]; // uint64_t s1 = Lv.bp_sets[b_i][1]; // fout.write((char *) &d, sizeof(d)); // fout.write((char *) &s0, sizeof(s0)); // fout.write((char *) &s1, sizeof(s1)); // } vector<std::pair<idi, weighti> > ordered_labels; // Traverse v_id's all existing labels for (inti b_i = 0; b_i < Lv.batches.size(); ++b_i) { idi id_offset = Lv.batches[b_i].batch_id * BATCH_SIZE; idi dist_start_index = Lv.batches[b_i].start_index; idi dist_bound_index = dist_start_index + Lv.batches[b_i].size; // Traverse dist_matrix for (idi dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) { idi v_start_index = Lv.distances[dist_i].start_index; idi v_bound_index = v_start_index + Lv.distances[dist_i].size; weighti dist = Lv.distances[dist_i].dist; for (idi v_i = v_start_index; v_i < v_bound_index; ++v_i) { idi tail = Lv.vertices[v_i] + id_offset; ordered_labels.push_back(std::make_pair(tail, dist)); } } } // Sort sort(ordered_labels.begin(), ordered_labels.end()); // // Store into file // fout.write((char *) &size_labels, sizeof(size_labels)); for (idi l_i = 0; l_i < size_labels; ++l_i) { idi l = ordered_labels[l_i].first; weighti d = ordered_labels[l_i].second; // fout.write((char *) &l, sizeof(l)); // fout.write((char *) &d, sizeof(d)); // {//test // txt_out << v_id << " " << v_rank << ": " << l << " " << (idi) d << std::endl; // } } } printf("Label_size: %'lu mean: %f\n", labels_count, static_cast<double>(labels_count) / num_v_); // fout.close(); } template<inti BATCH_SIZE> void ParaVertexCentricPLL<BATCH_SIZE>::load_index_from_file( const char *filename) { std::ifstream fin(filename); if (!fin.is_open()) { fprintf(stderr, "Error: cannot open file %s\n", filename); exit(EXIT_FAILURE); } idi num_v; // Load from file the number of vertices and the number of bit-parallel roots. fin.read((char *) &num_v, sizeof(num_v)); fin.read((char *) &BITPARALLEL_SIZE, sizeof(BITPARALLEL_SIZE)); num_v_ = num_v; Index.resize(num_v); uint64_t labels_count = 0; // Load labels for every vertex for (idi v_id = 0; v_id < num_v; ++v_id) { IndexOrdered &Iv = Index[v_id]; // Load Bit-parallel Labels from file. for (inti b_i = 0; b_i < BITPARALLEL_SIZE; ++b_i) { fin.read((char *) &Iv.bp_dist[b_i], sizeof(Iv.bp_dist[b_i])); fin.read((char *) &Iv.bp_sets[b_i][0], sizeof(Iv.bp_sets[b_i][0])); fin.read((char *) &Iv.bp_sets[b_i][1], sizeof(Iv.bp_sets[b_i][1])); } // Normal Labels // Load Labels from file. idi size_labels; fin.read((char *) &size_labels, sizeof(size_labels)); labels_count += size_labels; Iv.label_id.resize(size_labels + 1); Iv.label_dists.resize(size_labels + 1); for (idi l_i = 0; l_i < size_labels; ++l_i) { fin.read((char *) &Iv.label_id[l_i], sizeof(Iv.label_id[l_i])); fin.read((char *) &Iv.label_dists[l_i], sizeof(Iv.label_dists[l_i])); } Iv.label_id[size_labels] = num_v; // Sentinel Iv.label_dists[size_labels] = (weighti) -1; // Sentinel } printf("Label_size_loaded: %'lu mean: %f\n", labels_count, static_cast<double>(labels_count) / num_v); fin.close(); } template<inti BATCH_SIZE> void ParaVertexCentricPLL<BATCH_SIZE>::order_labels( const vector<idi> &rank2id, const vector<idi> &rank) { idi num_v = rank.size(); vector<vector<pair < idi, weighti> > > ordered_L(num_v); idi labels_count = 0; Index.resize(num_v); // Traverse the L, put them into Index (ordered labels) for (idi v_id = 0; v_id < num_v; ++v_id) { idi new_v = rank2id[v_id]; IndexOrdered &Iv = Index[new_v]; const IndexType &Lv = L[v_id]; auto &OLv = ordered_L[new_v]; // Bit-parallel Labels memcpy(&Iv.bp_dist, &Lv.bp_dist, BITPARALLEL_SIZE * sizeof(weighti)); for (inti b_i = 0; b_i < BITPARALLEL_SIZE; ++b_i) { memcpy(&Iv.bp_sets[b_i], &Lv.bp_sets[b_i], 2 * sizeof(uint64_t)); } // Normal Labels // Traverse v_id's all existing labels for (inti b_i = 0; b_i < Lv.batches.size(); ++b_i) { idi id_offset = Lv.batches[b_i].batch_id * BATCH_SIZE; idi dist_start_index = Lv.batches[b_i].start_index; idi dist_bound_index = dist_start_index + Lv.batches[b_i].size; // Traverse dist_matrix for (idi dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) { idi v_start_index = Lv.distances[dist_i].start_index; idi v_bound_index = v_start_index + Lv.distances[dist_i].size; inti dist = Lv.distances[dist_i].dist; for (idi v_i = v_start_index; v_i < v_bound_index; ++v_i) { idi tail = Lv.vertices[v_i] + id_offset; // idi new_tail = rank2id[tail]; // new_L[new_v].push_back(make_pair(new_tail, dist)); OLv.push_back(std::make_pair(tail, dist)); } } } // Sort sort(OLv.begin(), OLv.end()); // Store into Index inti size_labels = OLv.size(); labels_count += size_labels; Iv.label_id.resize(size_labels + 1); // Adding one for Sentinel Iv.label_dists.resize(size_labels + 1); // Adding one for Sentinel for (inti l_i = 0; l_i < size_labels; ++l_i) { Iv.label_id[l_i] = OLv[l_i].first; Iv.label_dists[l_i] = OLv[l_i].second; } Iv.label_id[size_labels] = num_v; // Sentinel Iv.label_dists[size_labels] = WEIGHTI_MAX; // Sentinel } printf("Label_size: %u mean: %f\n", labels_count, static_cast<double>(labels_count) / num_v); // // Test // { // puts("Asserting..."); // for (idi v_id = 0; v_id < num_v; ++v_id) { // const IndexType &Lv = L[v_id]; // const IndexOrdered &Iv = Index[rank2id[v_id]]; // // Bit-parallel Labels // for (inti b_i = 0; b_i < BITPARALLEL_SIZE; ++b_i) { // assert(Lv.bp_dist[b_i] == Iv.bp_dist[b_i]); // assert(Lv.bp_sets[b_i][0] == Iv.bp_sets[b_i][0]); // assert(Lv.bp_sets[b_i][1] == Iv.bp_sets[b_i][1]); // } // // Normal Labels // assert(Lv.vertices.size() == Iv.label_id.size()); // assert(Lv.vertices.size() == Iv.label_dists.size()); //// { //// inti bound_i = Iv.label_id.size() > 10 ? 10 : Iv.label_id.size(); //// printf("V %u:", rank2id[v_id]); //// for (inti i = 0; i < bound_i; ++i) { //// printf(" (%u, %u)", Iv.label_id[i], Iv.label_dists[i]); //// } //// puts(""); //// } // // } // puts("Asserted."); // } } template<inti BATCH_SIZE> weighti ParaVertexCentricPLL<BATCH_SIZE>::query_distance( idi a, idi b) { idi num_v = num_v_; if (a >= num_v || b >= num_v) { return a == b ? 0 : WEIGHTI_MAX; } // // A is shorter than B // IndexOrdered &Ia = (Index[a].label_id.size() < Index[b].label_id.size()) ? Index[a] : Index[b]; // IndexOrdered &Ib = (Index[a].label_id.size() < Index[b].label_id.size()) ? Index[b] : Index[a]; // // A is longer than B // IndexOrdered &Ia = (Index[a].label_id.size() > Index[b].label_id.size()) ? Index[a] : Index[b]; // IndexOrdered &Ib = (Index[a].label_id.size() > Index[b].label_id.size()) ? Index[b] : Index[a]; IndexOrdered &Ia = Index[a]; IndexOrdered &Ib = Index[b]; // const IndexOrdered &Ia = Index[a]; // const IndexOrdered &Ib = Index[b]; inti d = WEIGHTI_MAX; _mm_prefetch(&Ia.label_id[0], _MM_HINT_T0); _mm_prefetch(&Ib.label_id[0], _MM_HINT_T0); _mm_prefetch(&Ia.label_dists[0], _MM_HINT_T0); _mm_prefetch(&Ib.label_dists[0], _MM_HINT_T0); // Bit-Parallel Labels for (int i = 0; i < BITPARALLEL_SIZE; ++i) { int td = Ia.bp_dist[i] + Ib.bp_dist[i]; if (td - 2 <= d) { td += (Ia.bp_sets[i][0] & Ib.bp_sets[i][0]) ? -2 : ((Ia.bp_sets[i][0] & Ib.bp_sets[i][1]) | (Ia.bp_sets[i][1] & Ib.bp_sets[i][0])) ? -1 : 0; if (td < d) { d = td; } } } // Normal Labels (ordered) // // Vectorizaed Version // vector<idi> &A = Ia.label_id; // vector<idi> &B = Ib.label_id; // idi len_B = B.size() - 1; //// idi len_B = B.size(); // idi bound_b_base_i = len_B - (len_B % NUM_P_INT); // idi a_i = 0; // idi b_base_i = 0; // idi len_A = A.size() - 1; //// idi len_A = A.size(); // ++length_larger_than_16.second; // if (len_B >= 16) { // ++length_larger_than_16.first; // } // while (a_i < len_A && b_base_i < bound_b_base_i) { // int a = A[a_i]; // __m512i a_v = _mm512_set1_epi32(a); // // // Packed b // __m512i b_v = _mm512_loadu_epi32(&B[b_base_i]); // @suppress("Function cannot be resolved") // __mmask16 is_equal_m = _mm512_cmpeq_epi32_mask(a_v, b_v); // if (is_equal_m) { //// if (a == num_v) { //// break; // Sentinel //// } // inti td = Ia.label_dists[a_i] + Ib.label_dists[b_base_i + (idi) (log2(is_equal_m))]; // if (td < d) { // d = td; // } // // // Advance index // if (is_equal_m & (__mmask16) 0x8000) { // ++a_i; // b_base_i += NUM_P_INT; // } else { // a_i += (a < B[b_base_i + NUM_P_INT - 1]) ? 1 : 0; // b_base_i += (B[b_base_i + NUM_P_INT - 1] < a) ? NUM_P_INT : 0; // } // } else { // // Advance index // a_i += (a < B[b_base_i + NUM_P_INT - 1]) ? 1 : 0; // b_base_i += (B[b_base_i + NUM_P_INT - 1] < a) ? NUM_P_INT : 0; // } // } // while (a_i < len_A && b_base_i < len_B) { // if (A[a_i] == B[b_base_i]) { //// if (a == num_v) { //// break; // Sentinel //// } // inti td = Ia.label_dists[a_i] + Ib.label_dists[b_base_i]; // if (td < d) { // d = td; // } // // // Advance index // ++a_i; // ++b_base_i; // } else { // // Advance index // a_i += (A[a_i] < B[b_base_i]) ? 1 : 0; // b_base_i += (B[b_base_i] < A[a_i]) ? 1 : 0; // } // } // Sequential Version for (idi i1 = 0, i2 = 0;;) { idi v1 = Ia.label_id[i1], v2 = Ib.label_id[i2]; if (v1 == v2) { if (v1 == num_v) { break; // Sentinel } inti td = Ia.label_dists[i1] + Ib.label_dists[i2]; if (td < d) { d = td; } ++i1; ++i2; } else { i1 += v1 < v2 ? 1 : 0; i2 += v1 > v2 ? 1 : 0; } } if (d >= WEIGHTI_MAX - 2) { d = WEIGHTI_MAX; } return d; } template<inti BATCH_SIZE> void ParaVertexCentricPLL<BATCH_SIZE>::switch_labels_to_old_id( const vector<idi> &rank2id, const vector<idi> &rank) { idi label_sum = 0; idi test_label_sum = 0; // idi num_v = rank2id.size(); idi num_v = rank.size(); vector<vector<pair < idi, weighti> > > new_L(num_v); // for (idi r = 0; r < num_v; ++r) { // idi v = rank2id[r]; // const IndexType &Lr = L[r]; // IndexType &Lv = new_L[v]; // idi size = Lr.get_size(); // label_sum += size; // for (idi li = 0; li < size; ++li) { // idi l = Lr.get_label_ith_v(li); // idi new_l = rank2id[l]; // Lv.add_label_seq(new_l, Lr.get_label_ith_d(li)); // } // } // L = new_L; for (idi v_id = 0; v_id < num_v; ++v_id) { idi new_v = rank2id[v_id]; const IndexType &Lv = L[v_id]; // Traverse v_id's all existing labels for (inti b_i = 0; b_i < Lv.batches.size(); ++b_i) { idi id_offset = Lv.batches[b_i].batch_id * BATCH_SIZE; idi dist_start_index = Lv.batches[b_i].start_index; idi dist_bound_index = dist_start_index + Lv.batches[b_i].size; // Traverse dist_matrix for (idi dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) { label_sum += Lv.distances[dist_i].size; idi v_start_index = Lv.distances[dist_i].start_index; idi v_bound_index = v_start_index + Lv.distances[dist_i].size; inti dist = Lv.distances[dist_i].dist; for (idi v_i = v_start_index; v_i < v_bound_index; ++v_i) { idi tail = Lv.vertices[v_i] + id_offset; // idi new_tail = rank2id[tail]; // new_L[new_v].push_back(make_pair(new_tail, dist)); new_L[new_v].push_back(std::make_pair(tail, dist)); ++test_label_sum; } } } } printf("Label sum: %u %u mean: %f\n", label_sum, test_label_sum, label_sum * 1.0 / num_v); // // Try to print // for (idi v = 0; v < num_v; ++v) { // const auto &Lv = new_L[v]; // idi size = Lv.size(); // printf("Vertex %u (Size %u):", v, size); // for (idi i = 0; i < size; ++i) { // printf(" (%u, %d)", Lv[i].first, Lv[i].second); // fflush(stdout); // } // puts(""); // } // // Try query // idi u; // idi v; // while (std::cin >> u >> v) { // weighti dist = WEIGHTI_MAX; // // Bit Parallel Check // const IndexType &idx_u = L[rank[u]]; // const IndexType &idx_v = L[rank[v]]; // // for (inti i = 0; i < BITPARALLEL_SIZE; ++i) { // int td = idx_v.bp_dist[i] + idx_u.bp_dist[i]; // if (td - 2 <= dist) { // td += // (idx_v.bp_sets[i][0] & idx_u.bp_sets[i][0]) ? -2 : // ((idx_v.bp_sets[i][0] & idx_u.bp_sets[i][1]) // | (idx_v.bp_sets[i][1] & idx_u.bp_sets[i][0])) // ? -1 : 0; // if (td < dist) { // dist = td; // } // } // } // // // Normal Index Check // const auto &Lu = new_L[u]; // const auto &Lv = new_L[v]; //// unsorted_map<idi, weighti> markers; // map<idi, weighti> markers; // for (idi i = 0; i < Lu.size(); ++i) { // markers[Lu[i].first] = Lu[i].second; // } // for (idi i = 0; i < Lv.size(); ++i) { // const auto &tmp_l = markers.find(Lv[i].first); // if (tmp_l == markers.end()) { // continue; // } // int d = tmp_l->second + Lv[i].second; // if (d < dist) { // dist = d; // } // } // if (dist == 255) { // printf("2147483647\n"); // } else { // printf("%u\n", dist); // } // } } } #endif /* INCLUDES_PADO_H_ */
DRB065-pireduction-orig-no.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Classic PI calculation using reduction */ #define num_steps 2000000000 #include <stdio.h> int main(int argc, char** argv) { double pi = 0.0; long int i; double x, interval_width; interval_width = 1.0/(double)num_steps; #pragma omp parallel for private(i ,x ) reduction(+:pi) for (i = 0; i < num_steps; i++) { x = (i+ 0.5) * interval_width; pi += 1.0 / (x*x + 1.0); } pi = pi * 4.0 * interval_width; printf ("PI=%f\n", pi); return 0; }
transform.h
/*! * Copyright 2018 XGBoost contributors */ #ifndef XGBOOST_COMMON_TRANSFORM_H_ #define XGBOOST_COMMON_TRANSFORM_H_ #include <dmlc/omp.h> #include <xgboost/data.h> #include <utility> #include <vector> #include <type_traits> // enable_if #include "host_device_vector.h" #include "common.h" #include "span.h" #if defined (__CUDACC__) #include "device_helpers.cuh" #endif // defined (__CUDACC__) namespace xgboost { namespace common { constexpr size_t kBlockThreads = 256; namespace detail { #if defined(__CUDACC__) template <typename Functor, typename... SpanType> __global__ void LaunchCUDAKernel(Functor _func, Range _range, SpanType... _spans) { for (auto i : dh::GridStrideRange(*_range.begin(), *_range.end())) { _func(i, _spans...); } } #endif // defined(__CUDACC__) } // namespace detail /*! \brief Do Transformation on HostDeviceVectors. * * \tparam CompiledWithCuda A bool parameter used to distinguish compilation * trajectories, users do not need to use it. * * Note: Using Transform is a VERY tricky thing to do. Transform uses template * argument to duplicate itself into two different types, one for CPU, * another for CUDA. The trick is not without its flaw: * * If you use it in a function that can be compiled by both nvcc and host * compiler, the behaviour is un-defined! Because your function is NOT * duplicated by `CompiledWithCuda`. At link time, cuda compiler resolution * will merge functions with same signature. */ template <bool CompiledWithCuda = WITH_CUDA()> class Transform { private: template <typename Functor> struct Evaluator { public: Evaluator(Functor func, Range range, GPUSet devices, bool reshard) : func_(func), range_{std::move(range)}, reshard_{reshard}, distribution_{std::move(GPUDistribution::Block(devices))} {} Evaluator(Functor func, Range range, GPUDistribution dist, bool reshard) : func_(func), range_{std::move(range)}, reshard_{reshard}, distribution_{std::move(dist)} {} /*! * \brief Evaluate the functor with input pointers to HostDeviceVector. * * \tparam HDV... HostDeviceVectors type. * \param vectors Pointers to HostDeviceVector. */ template <typename... HDV> void Eval(HDV... vectors) const { bool on_device = !distribution_.IsEmpty(); if (on_device) { LaunchCUDA(func_, vectors...); } else { LaunchCPU(func_, vectors...); } } private: // CUDA UnpackHDV template <typename T> Span<T> UnpackHDV(HostDeviceVector<T>* _vec, int _device) const { auto span = _vec->DeviceSpan(_device); return span; } template <typename T> Span<T const> UnpackHDV(const HostDeviceVector<T>* _vec, int _device) const { auto span = _vec->ConstDeviceSpan(_device); return span; } // CPU UnpackHDV template <typename T> Span<T> UnpackHDV(HostDeviceVector<T>* _vec) const { return Span<T> {_vec->HostPointer(), static_cast<typename Span<T>::index_type>(_vec->Size())}; } template <typename T> Span<T const> UnpackHDV(const HostDeviceVector<T>* _vec) const { return Span<T const> {_vec->ConstHostPointer(), static_cast<typename Span<T>::index_type>(_vec->Size())}; } // Recursive unpack for Reshard. template <typename T> void UnpackReshard(GPUDistribution dist, const HostDeviceVector<T>* vector) const { vector->Reshard(dist); } template <typename Head, typename... Rest> void UnpackReshard(GPUDistribution dist, const HostDeviceVector<Head>* _vector, const HostDeviceVector<Rest>*... _vectors) const { _vector->Reshard(dist); UnpackReshard(dist, _vectors...); } #if defined(__CUDACC__) template <typename std::enable_if<CompiledWithCuda>::type* = nullptr, typename... HDV> void LaunchCUDA(Functor _func, HDV*... _vectors) const { if (reshard_) UnpackReshard(distribution_, _vectors...); GPUSet devices = distribution_.Devices(); size_t range_size = *range_.end() - *range_.begin(); // Extract index to deal with possible old OpenMP. size_t device_beg = *(devices.begin()); size_t device_end = *(devices.end()); #pragma omp parallel for schedule(static, 1) if (devices.Size() > 1) for (omp_ulong device = device_beg; device < device_end; ++device) { // NOLINT // Ignore other attributes of GPUDistribution for spliting index. // This deals with situation like multi-class setting where // granularity is used in data vector. size_t shard_size = GPUDistribution::Block(devices).ShardSize( range_size, devices.Index(device)); Range shard_range {0, static_cast<Range::DifferenceType>(shard_size)}; dh::safe_cuda(cudaSetDevice(device)); const int GRID_SIZE = static_cast<int>(dh::DivRoundUp(*(range_.end()), kBlockThreads)); detail::LaunchCUDAKernel<<<GRID_SIZE, kBlockThreads>>>( _func, shard_range, UnpackHDV(_vectors, device)...); } } #else /*! \brief Dummy funtion defined when compiling for CPU. */ template <typename std::enable_if<!CompiledWithCuda>::type* = nullptr, typename... HDV> void LaunchCUDA(Functor _func, HDV*... _vectors) const { LOG(FATAL) << "Not part of device code. WITH_CUDA: " << WITH_CUDA(); } #endif // defined(__CUDACC__) template <typename... HDV> void LaunchCPU(Functor func, HDV*... vectors) const { omp_ulong end = static_cast<omp_ulong>(*(range_.end())); #pragma omp parallel for schedule(static) for (omp_ulong idx = 0; idx < end; ++idx) { func(idx, UnpackHDV(vectors)...); } } private: /*! \brief Callable object. */ Functor func_; /*! \brief Range object specifying parallel threads index range. */ Range range_; /*! \brief Whether resharding for vectors is required. */ bool reshard_; GPUDistribution distribution_; }; public: /*! * \brief Initialize a Transform object. * * \tparam Functor A callable object type. * \return A Evaluator having one method Eval. * * \param func A callable object, accepting a size_t thread index, * followed by a set of Span classes. * \param range Range object specifying parallel threads index range. * \param devices GPUSet specifying GPUs to use, when compiling for CPU, * this should be GPUSet::Empty(). * \param reshard Whether Reshard for HostDeviceVector is needed. */ template <typename Functor> static Evaluator<Functor> Init(Functor func, Range const range, GPUSet const devices, bool const reshard = true) { return Evaluator<Functor> {func, std::move(range), std::move(devices), reshard}; } template <typename Functor> static Evaluator<Functor> Init(Functor func, Range const range, GPUDistribution const dist, bool const reshard = true) { return Evaluator<Functor> {func, std::move(range), std::move(dist), reshard}; } }; } // namespace common } // namespace xgboost #endif // XGBOOST_COMMON_TRANSFORM_H_
GrB_Matrix_export.c
//------------------------------------------------------------------------------ // GrB_Matrix_export: export a matrix in CSR, CSC, FullC, FullR, or COO format //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // Exports the contents of a matrix in one of 3 formats: CSR, CSC, or COO // (triplet format). The exported matrix is not modified. No typecast is // performed; the output array Ax must be of the same type as the input matrix // A. // The required sizes of the Ap, Ai, and Ax arrays are given by // GrB_Matrix_exportSize. // The GraphBLAS C API does not have a GrB* method to query the type of a // GrB_Matrix or the size of a type. SuiteSparse:GraphBLAS provides // GxB_Matrix_type_name to query the type of a matrix (returning a string), // which can be converted into a GrB_Type with GxB_Type_from_name. The size of // a type can be queried with GxB_Type_size. Using these methods, a user // application can ensure that its Ax array has the correct size for any // given GrB_Matrix it wishes to export, regardless of its type. #include "GB_transpose.h" #define GB_FREE_ALL \ { \ GB_phbix_free (T) ; \ } //------------------------------------------------------------------------------ // GB_export_worker: export a matrix of any type //------------------------------------------------------------------------------ static GrB_Info GB_export_worker // export a matrix ( GrB_Index *Ap, // pointers for CSR, CSC, row indices for COO GrB_Index *Ai, // row indices for CSR, CSC, col indices for COO void *Ax, // values (must match the type of A_input) GrB_Index *Ap_len, // number of entries in Ap (not # of bytes) GrB_Index *Ai_len, // number of entries in Ai (not # of bytes) GrB_Index *Ax_len, // number of entries in Ax (not # of bytes) GrB_Format format, // export format GrB_Matrix A_input, // matrix to export GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- GrB_Info info ; GrB_Matrix A = A_input ; struct GB_Matrix_opaque T_header ; GrB_Matrix T = GB_clear_static_header (&T_header) ; switch (format) { case GrB_CSR_FORMAT : case GrB_CSC_FORMAT : case GrB_COO_FORMAT : GB_RETURN_IF_NULL (Ap) ; GB_RETURN_IF_NULL (Ap_len) ; GB_RETURN_IF_NULL (Ai) ; GB_RETURN_IF_NULL (Ai_len) ; default: GB_RETURN_IF_NULL (Ax) ; GB_RETURN_IF_NULL (Ax_len) ; } // finish any pending work GB_MATRIX_WAIT (A) ; //-------------------------------------------------------------------------- // determine current format of A and if a copy is needed //-------------------------------------------------------------------------- int sparsity = GB_sparsity (A) ; bool is_csc = A->is_csc ; bool make_copy ; bool csc_requested ; switch (format) { case GrB_CSR_FORMAT : make_copy = !(sparsity == GxB_SPARSE && !is_csc) ; csc_requested = false ; break ; case GrB_CSC_FORMAT : make_copy = !(sparsity == GxB_SPARSE && is_csc) ; csc_requested = true ; break ; // case GrB_DENSE_ROW_FORMAT : // if (!GB_is_dense (A)) // { // // A must dense or full // return (GrB_INVALID_VALUE) ; // } // make_copy = !(sparsity == GxB_FULL && !is_csc) ; // csc_requested = false ; // break ; // case GrB_DENSE_COL_FORMAT : // if (!GB_is_dense (A)) // { // // A must dense or full // return (GrB_INVALID_VALUE) ; // } // make_copy = !(sparsity == GxB_FULL && is_csc) ; // csc_requested = true ; // break ; case GrB_COO_FORMAT : // never make a copy to export in tuple format make_copy = false ; csc_requested = is_csc ; break ; default : // unknown format return (GrB_INVALID_VALUE) ; } //-------------------------------------------------------------------------- // create a copy if the matrix is not in the requested format //-------------------------------------------------------------------------- if (make_copy) { if (is_csc != csc_requested) { // T = A' GB_OK (GB_transpose_cast (T, A->type, csc_requested, A, false, Context)) ; } else { // T = A GB_OK (GB_dup_worker (&T, A->iso, A, true, A->type, Context)) ; } switch (format) { case GrB_CSR_FORMAT : case GrB_CSC_FORMAT : GB_OK (GB_convert_any_to_sparse (T, Context)) ; break ; // case GrB_DENSE_ROW_FORMAT : // case GrB_DENSE_COL_FORMAT : // GB_convert_any_to_full (T) ; // break ; default : break ; } A = T ; } //-------------------------------------------------------------------------- // export the contents of the matrix //-------------------------------------------------------------------------- GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; GrB_Index nvals = GB_nnz (A) ; int64_t plen = A->vdim+1 ; switch (format) { case GrB_CSR_FORMAT : case GrB_CSC_FORMAT : if (plen > (*Ap_len) || nvals > (*Ai_len)) { GB_FREE_ALL ; return (GrB_INSUFFICIENT_SPACE) ; } GB_memcpy (Ap, A->p, plen * sizeof (GrB_Index), nthreads_max) ; GB_memcpy (Ai, A->i, nvals * sizeof (GrB_Index), nthreads_max) ; (*Ap_len) = plen ; (*Ai_len) = nvals ; // case GrB_DENSE_ROW_FORMAT : // case GrB_DENSE_COL_FORMAT : if (nvals > (*Ax_len)) { GB_FREE_ALL ; return (GrB_INSUFFICIENT_SPACE) ; } (*Ax_len) = nvals ; ASSERT (csc_requested == A->is_csc) ; if (A->iso) { // expand the iso A->x into the non-iso array Ax ASSERT (nvals > 0) ; GB_iso_expand (Ax, nvals, A->x, A->type->size, Context) ; } else { GB_memcpy (Ax, A->x, nvals * A->type->size, nthreads_max) ; } break ; default: case GrB_COO_FORMAT : if (nvals > (*Ap_len) || nvals > (*Ai_len) || nvals > (*Ax_len)) { GB_FREE_ALL ; return (GrB_INSUFFICIENT_SPACE) ; } GB_OK (GB_extractTuples (Ap, Ai, Ax, &nvals, A->type->code, A, Context)) ; (*Ap_len) = nvals ; (*Ai_len) = nvals ; (*Ax_len) = nvals ; break ; } //-------------------------------------------------------------------------- // free workspace and return result //-------------------------------------------------------------------------- GB_FREE_ALL ; #pragma omp flush return (GrB_SUCCESS) ; } //------------------------------------------------------------------------------ // GrB_Matrix_export_*: export a matrix of a given type //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL ; #define GB_EXPORT(prefix,ctype,T,acode) \ GrB_Info GB_EVAL3 (prefix, _Matrix_export_, T) /* export a matrix */ \ ( \ GrB_Index *Ap, /* pointers for CSR, CSC, row indices for COO */\ GrB_Index *Ai, /* row indices for CSR, CSC, col indices for COO */\ ctype *Ax, /* values (must match the type of A) */\ GrB_Index *Ap_len, /* number of entries in Ap (not # of bytes) */\ GrB_Index *Ai_len, /* number of entries in Ai (not # of bytes) */\ GrB_Index *Ax_len, /* number of entries in Ax (not # of bytes) */\ GrB_Format format, /* export format */\ GrB_Matrix A /* matrix to export */\ ) \ { \ GB_WHERE1 (GB_STR(prefix) "_Matrix_export_" GB_STR(T) \ " (Ap, Ai, Ax, &Ap_len, &Ai_len, &Ax_len, format, A)") ; \ GB_BURBLE_START (GB_STR(prefix) "_Matrix_export_" GB_STR(T)) ; \ GB_RETURN_IF_NULL_OR_FAULTY (A) ; \ if (A->type->code != acode) return (GrB_DOMAIN_MISMATCH) ; \ GrB_Info info = GB_export_worker (Ap, Ai, (void *) Ax, \ Ap_len, Ai_len, Ax_len, format, A, Context) ; \ GB_BURBLE_END ; \ return (info) ; \ } GB_EXPORT (GrB, bool , BOOL , GB_BOOL_code ) GB_EXPORT (GrB, int8_t , INT8 , GB_INT8_code ) GB_EXPORT (GrB, int16_t , INT16 , GB_INT16_code ) GB_EXPORT (GrB, int32_t , INT32 , GB_INT32_code ) GB_EXPORT (GrB, int64_t , INT64 , GB_INT64_code ) GB_EXPORT (GrB, uint8_t , UINT8 , GB_UINT8_code ) GB_EXPORT (GrB, uint16_t , UINT16 , GB_UINT16_code) GB_EXPORT (GrB, uint32_t , UINT32 , GB_UINT32_code) GB_EXPORT (GrB, uint64_t , UINT64 , GB_UINT64_code) GB_EXPORT (GrB, float , FP32 , GB_FP32_code ) GB_EXPORT (GrB, double , FP64 , GB_FP64_code ) GB_EXPORT (GxB, GxB_FC32_t, FC32 , GB_FC32_code ) GB_EXPORT (GxB, GxB_FC64_t, FC64 , GB_FC64_code ) GB_EXPORT (GrB, void , UDT , GB_UDT_code )
omp-low.c
/* Lowering pass for OpenMP directives. Converts OpenMP directives into explicit calls to the runtime library (libgomp) and data marshalling to implement data sharing and copying clauses. Contributed by Diego Novillo <dnovillo@redhat.com> Copyright (C) 2005, 2006 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #include "config.h" #include "system.h" #include "coretypes.h" #include "tm.h" #include "tree.h" #include "rtl.h" #include "tree-gimple.h" #include "tree-inline.h" #include "langhooks.h" #include "diagnostic.h" #include "tree-flow.h" #include "timevar.h" #include "flags.h" #include "function.h" #include "expr.h" #include "toplev.h" #include "tree-pass.h" #include "ggc.h" #include "except.h" /* Lowering of OpenMP parallel and workshare constructs proceeds in two phases. The first phase scans the function looking for OMP statements and then for variables that must be replaced to satisfy data sharing clauses. The second phase expands code for the constructs, as well as re-gimplifying things when variables have been replaced with complex expressions. Final code generation is done by pass_expand_omp. The flowgraph is scanned for parallel regions which are then moved to a new function, to be invoked by the thread library. */ /* Context structure. Used to store information about each parallel directive in the code. */ typedef struct omp_context { /* This field must be at the beginning, as we do "inheritance": Some callback functions for tree-inline.c (e.g., omp_copy_decl) receive a copy_body_data pointer that is up-casted to an omp_context pointer. */ copy_body_data cb; /* The tree of contexts corresponding to the encountered constructs. */ struct omp_context *outer; tree stmt; /* Map variables to fields in a structure that allows communication between sending and receiving threads. */ splay_tree field_map; tree record_type; tree sender_decl; tree receiver_decl; /* A chain of variables to add to the top-level block surrounding the construct. In the case of a parallel, this is in the child function. */ tree block_vars; /* What to do with variables with implicitly determined sharing attributes. */ enum omp_clause_default_kind default_kind; /* Nesting depth of this context. Used to beautify error messages re invalid gotos. The outermost ctx is depth 1, with depth 0 being reserved for the main body of the function. */ int depth; /* True if this parallel directive is nested within another. */ bool is_nested; } omp_context; /* A structure describing the main elements of a parallel loop. */ struct omp_for_data { tree v, n1, n2, step, chunk_size, for_stmt; enum tree_code cond_code; tree pre; bool have_nowait, have_ordered; enum omp_clause_schedule_kind sched_kind; }; static splay_tree all_contexts; static int parallel_nesting_level; struct omp_region *root_omp_region; static void scan_omp (tree *, omp_context *); static void lower_omp (tree *, omp_context *); static tree lookup_decl_in_outer_ctx (tree, omp_context *); static tree maybe_lookup_decl_in_outer_ctx (tree, omp_context *); /* Find an OpenMP clause of type KIND within CLAUSES. */ static tree find_omp_clause (tree clauses, enum tree_code kind) { for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses)) if (OMP_CLAUSE_CODE (clauses) == kind) return clauses; return NULL_TREE; } /* Return true if CTX is for an omp parallel. */ static inline bool is_parallel_ctx (omp_context *ctx) { return TREE_CODE (ctx->stmt) == OMP_PARALLEL; } /* Return true if REGION is a combined parallel+workshare region. */ static inline bool is_combined_parallel (struct omp_region *region) { return region->is_combined_parallel; } /* Extract the header elements of parallel loop FOR_STMT and store them into *FD. */ static void extract_omp_for_data (tree for_stmt, struct omp_for_data *fd) { tree t; fd->for_stmt = for_stmt; fd->pre = NULL; t = OMP_FOR_INIT (for_stmt); gcc_assert (TREE_CODE (t) == MODIFY_EXPR); fd->v = TREE_OPERAND (t, 0); gcc_assert (DECL_P (fd->v)); gcc_assert (TREE_CODE (TREE_TYPE (fd->v)) == INTEGER_TYPE); fd->n1 = TREE_OPERAND (t, 1); t = OMP_FOR_COND (for_stmt); fd->cond_code = TREE_CODE (t); gcc_assert (TREE_OPERAND (t, 0) == fd->v); fd->n2 = TREE_OPERAND (t, 1); switch (fd->cond_code) { case LT_EXPR: case GT_EXPR: break; case LE_EXPR: fd->n2 = fold_build2 (PLUS_EXPR, TREE_TYPE (fd->n2), fd->n2, build_int_cst (TREE_TYPE (fd->n2), 1)); fd->cond_code = LT_EXPR; break; case GE_EXPR: fd->n2 = fold_build2 (MINUS_EXPR, TREE_TYPE (fd->n2), fd->n2, build_int_cst (TREE_TYPE (fd->n2), 1)); fd->cond_code = GT_EXPR; break; default: gcc_unreachable (); } t = OMP_FOR_INCR (fd->for_stmt); gcc_assert (TREE_CODE (t) == MODIFY_EXPR); gcc_assert (TREE_OPERAND (t, 0) == fd->v); t = TREE_OPERAND (t, 1); gcc_assert (TREE_OPERAND (t, 0) == fd->v); switch (TREE_CODE (t)) { case PLUS_EXPR: fd->step = TREE_OPERAND (t, 1); break; case MINUS_EXPR: fd->step = TREE_OPERAND (t, 1); fd->step = fold_build1 (NEGATE_EXPR, TREE_TYPE (fd->step), fd->step); break; default: gcc_unreachable (); } fd->have_nowait = fd->have_ordered = false; fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC; fd->chunk_size = NULL_TREE; for (t = OMP_FOR_CLAUSES (for_stmt); t ; t = OMP_CLAUSE_CHAIN (t)) switch (OMP_CLAUSE_CODE (t)) { case OMP_CLAUSE_NOWAIT: fd->have_nowait = true; break; case OMP_CLAUSE_ORDERED: fd->have_ordered = true; break; case OMP_CLAUSE_SCHEDULE: fd->sched_kind = OMP_CLAUSE_SCHEDULE_KIND (t); fd->chunk_size = OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t); break; default: break; } if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME) gcc_assert (fd->chunk_size == NULL); else if (fd->chunk_size == NULL) { /* We only need to compute a default chunk size for ordered static loops and dynamic loops. */ if (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC || fd->have_ordered) fd->chunk_size = (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC) ? integer_zero_node : integer_one_node; } } /* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB is the immediate dominator of PAR_ENTRY_BB, return true if there are no data dependencies that would prevent expanding the parallel directive at PAR_ENTRY_BB as a combined parallel+workshare region. When expanding a combined parallel+workshare region, the call to the child function may need additional arguments in the case of OMP_FOR regions. In some cases, these arguments are computed out of variables passed in from the parent to the child via 'struct .omp_data_s'. For instance: #pragma omp parallel for schedule (guided, i * 4) for (j ...) Is lowered into: # BLOCK 2 (PAR_ENTRY_BB) .omp_data_o.i = i; #pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598) # BLOCK 3 (WS_ENTRY_BB) .omp_data_i = &.omp_data_o; D.1667 = .omp_data_i->i; D.1598 = D.1667 * 4; #pragma omp for schedule (guided, D.1598) When we outline the parallel region, the call to the child function 'bar.omp_fn.0' will need the value D.1598 in its argument list, but that value is computed *after* the call site. So, in principle we cannot do the transformation. To see whether the code in WS_ENTRY_BB blocks the combined parallel+workshare call, we collect all the variables used in the OMP_FOR header check whether they appear on the LHS of any statement in WS_ENTRY_BB. If so, then we cannot emit the combined call. FIXME. If we had the SSA form built at this point, we could merely hoist the code in block 3 into block 2 and be done with it. But at this point we don't have dataflow information and though we could hack something up here, it is really not worth the aggravation. */ static bool workshare_safe_to_combine_p (basic_block par_entry_bb, basic_block ws_entry_bb) { struct omp_for_data fd; tree par_stmt, ws_stmt; par_stmt = last_stmt (par_entry_bb); ws_stmt = last_stmt (ws_entry_bb); if (TREE_CODE (ws_stmt) == OMP_SECTIONS) return true; gcc_assert (TREE_CODE (ws_stmt) == OMP_FOR); extract_omp_for_data (ws_stmt, &fd); /* FIXME. We give up too easily here. If any of these arguments are not constants, they will likely involve variables that have been mapped into fields of .omp_data_s for sharing with the child function. With appropriate data flow, it would be possible to see through this. */ if (!is_gimple_min_invariant (fd.n1) || !is_gimple_min_invariant (fd.n2) || !is_gimple_min_invariant (fd.step) || (fd.chunk_size && !is_gimple_min_invariant (fd.chunk_size))) return false; return true; } /* Collect additional arguments needed to emit a combined parallel+workshare call. WS_STMT is the workshare directive being expanded. */ static tree get_ws_args_for (tree ws_stmt) { tree t; if (TREE_CODE (ws_stmt) == OMP_FOR) { struct omp_for_data fd; tree ws_args; extract_omp_for_data (ws_stmt, &fd); ws_args = NULL_TREE; if (fd.chunk_size) { t = fold_convert (long_integer_type_node, fd.chunk_size); ws_args = tree_cons (NULL, t, ws_args); } t = fold_convert (long_integer_type_node, fd.step); ws_args = tree_cons (NULL, t, ws_args); t = fold_convert (long_integer_type_node, fd.n2); ws_args = tree_cons (NULL, t, ws_args); t = fold_convert (long_integer_type_node, fd.n1); ws_args = tree_cons (NULL, t, ws_args); return ws_args; } else if (TREE_CODE (ws_stmt) == OMP_SECTIONS) { basic_block bb = bb_for_stmt (ws_stmt); t = build_int_cst (unsigned_type_node, EDGE_COUNT (bb->succs)); t = tree_cons (NULL, t, NULL); return t; } gcc_unreachable (); } /* Discover whether REGION is a combined parallel+workshare region. */ static void determine_parallel_type (struct omp_region *region) { basic_block par_entry_bb, par_exit_bb; basic_block ws_entry_bb, ws_exit_bb; if (region == NULL || region->inner == NULL || region->exit == NULL || region->inner->exit == NULL) return; /* We only support parallel+for and parallel+sections. */ if (region->type != OMP_PARALLEL || (region->inner->type != OMP_FOR && region->inner->type != OMP_SECTIONS)) return; /* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and WS_EXIT_BB -> PAR_EXIT_BB. */ par_entry_bb = region->entry; par_exit_bb = region->exit; ws_entry_bb = region->inner->entry; ws_exit_bb = region->inner->exit; if (single_succ (par_entry_bb) == ws_entry_bb && single_succ (ws_exit_bb) == par_exit_bb && workshare_safe_to_combine_p (par_entry_bb, ws_entry_bb) && (OMP_PARALLEL_COMBINED (last_stmt (par_entry_bb)) || (last_and_only_stmt (ws_entry_bb) && last_and_only_stmt (par_exit_bb)))) { tree ws_stmt = last_stmt (ws_entry_bb); if (region->inner->type == OMP_FOR) { /* If this is a combined parallel loop, we need to determine whether or not to use the combined library calls. There are two cases where we do not apply the transformation: static loops and any kind of ordered loop. In the first case, we already open code the loop so there is no need to do anything else. In the latter case, the combined parallel loop call would still need extra synchronization to implement ordered semantics, so there would not be any gain in using the combined call. */ tree clauses = OMP_FOR_CLAUSES (ws_stmt); tree c = find_omp_clause (clauses, OMP_CLAUSE_SCHEDULE); if (c == NULL || OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_STATIC || find_omp_clause (clauses, OMP_CLAUSE_ORDERED)) { region->is_combined_parallel = false; region->inner->is_combined_parallel = false; return; } } region->is_combined_parallel = true; region->inner->is_combined_parallel = true; region->ws_args = get_ws_args_for (ws_stmt); } } /* Return true if EXPR is variable sized. */ static inline bool is_variable_sized (tree expr) { return !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (expr))); } /* Return true if DECL is a reference type. */ static inline bool is_reference (tree decl) { return lang_hooks.decls.omp_privatize_by_reference (decl); } /* Lookup variables in the decl or field splay trees. The "maybe" form allows for the variable form to not have been entered, otherwise we assert that the variable must have been entered. */ static inline tree lookup_decl (tree var, omp_context *ctx) { splay_tree_node n; n = splay_tree_lookup (ctx->cb.decl_map, (splay_tree_key) var); return (tree) n->value; } static inline tree maybe_lookup_decl (tree var, omp_context *ctx) { splay_tree_node n; n = splay_tree_lookup (ctx->cb.decl_map, (splay_tree_key) var); return n ? (tree) n->value : NULL_TREE; } static inline tree lookup_field (tree var, omp_context *ctx) { splay_tree_node n; n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var); return (tree) n->value; } static inline tree maybe_lookup_field (tree var, omp_context *ctx) { splay_tree_node n; n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var); return n ? (tree) n->value : NULL_TREE; } /* Return true if DECL should be copied by pointer. SHARED_P is true if DECL is to be shared. */ static bool use_pointer_for_field (tree decl, bool shared_p) { if (AGGREGATE_TYPE_P (TREE_TYPE (decl))) return true; /* We can only use copy-in/copy-out semantics for shared variables when we know the value is not accessible from an outer scope. */ if (shared_p) { /* ??? Trivially accessible from anywhere. But why would we even be passing an address in this case? Should we simply assert this to be false, or should we have a cleanup pass that removes these from the list of mappings? */ if (TREE_STATIC (decl) || DECL_EXTERNAL (decl)) return true; /* For variables with DECL_HAS_VALUE_EXPR_P set, we cannot tell without analyzing the expression whether or not its location is accessible to anyone else. In the case of nested parallel regions it certainly may be. */ if (TREE_CODE (decl) != RESULT_DECL && DECL_HAS_VALUE_EXPR_P (decl)) return true; /* Do not use copy-in/copy-out for variables that have their address taken. */ if (TREE_ADDRESSABLE (decl)) return true; } return false; } /* Construct a new automatic decl similar to VAR. */ static tree omp_copy_decl_2 (tree var, tree name, tree type, omp_context *ctx) { tree copy = build_decl (VAR_DECL, name, type); TREE_ADDRESSABLE (copy) = TREE_ADDRESSABLE (var); DECL_COMPLEX_GIMPLE_REG_P (copy) = DECL_COMPLEX_GIMPLE_REG_P (var); DECL_ARTIFICIAL (copy) = DECL_ARTIFICIAL (var); DECL_IGNORED_P (copy) = DECL_IGNORED_P (var); TREE_USED (copy) = 1; DECL_CONTEXT (copy) = current_function_decl; DECL_SEEN_IN_BIND_EXPR_P (copy) = 1; TREE_CHAIN (copy) = ctx->block_vars; ctx->block_vars = copy; return copy; } static tree omp_copy_decl_1 (tree var, omp_context *ctx) { return omp_copy_decl_2 (var, DECL_NAME (var), TREE_TYPE (var), ctx); } /* Build tree nodes to access the field for VAR on the receiver side. */ static tree build_receiver_ref (tree var, bool by_ref, omp_context *ctx) { tree x, field = lookup_field (var, ctx); /* If the receiver record type was remapped in the child function, remap the field into the new record type. */ x = maybe_lookup_field (field, ctx); if (x != NULL) field = x; x = build_fold_indirect_ref (ctx->receiver_decl); x = build3 (COMPONENT_REF, TREE_TYPE (field), x, field, NULL); if (by_ref) x = build_fold_indirect_ref (x); return x; } /* Build tree nodes to access VAR in the scope outer to CTX. In the case of a parallel, this is a component reference; for workshare constructs this is some variable. */ static tree build_outer_var_ref (tree var, omp_context *ctx) { tree x; if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx))) x = var; else if (is_variable_sized (var)) { x = TREE_OPERAND (DECL_VALUE_EXPR (var), 0); x = build_outer_var_ref (x, ctx); x = build_fold_indirect_ref (x); } else if (is_parallel_ctx (ctx)) { bool by_ref = use_pointer_for_field (var, false); x = build_receiver_ref (var, by_ref, ctx); } else if (ctx->outer) x = lookup_decl (var, ctx->outer); else if (is_reference (var)) /* This can happen with orphaned constructs. If var is reference, it is possible it is shared and as such valid. */ x = var; else gcc_unreachable (); if (is_reference (var)) x = build_fold_indirect_ref (x); return x; } /* Build tree nodes to access the field for VAR on the sender side. */ static tree build_sender_ref (tree var, omp_context *ctx) { tree field = lookup_field (var, ctx); return build3 (COMPONENT_REF, TREE_TYPE (field), ctx->sender_decl, field, NULL); } /* Add a new field for VAR inside the structure CTX->SENDER_DECL. */ static void install_var_field (tree var, bool by_ref, omp_context *ctx) { tree field, type; gcc_assert (!splay_tree_lookup (ctx->field_map, (splay_tree_key) var)); type = TREE_TYPE (var); if (by_ref) type = build_pointer_type (type); field = build_decl (FIELD_DECL, DECL_NAME (var), type); /* Remember what variable this field was created for. This does have a side effect of making dwarf2out ignore this member, so for helpful debugging we clear it later in delete_omp_context. */ DECL_ABSTRACT_ORIGIN (field) = var; insert_field_into_struct (ctx->record_type, field); splay_tree_insert (ctx->field_map, (splay_tree_key) var, (splay_tree_value) field); } static tree install_var_local (tree var, omp_context *ctx) { tree new_var = omp_copy_decl_1 (var, ctx); insert_decl_map (&ctx->cb, var, new_var); return new_var; } /* Adjust the replacement for DECL in CTX for the new context. This means copying the DECL_VALUE_EXPR, and fixing up the type. */ static void fixup_remapped_decl (tree decl, omp_context *ctx, bool private_debug) { tree new_decl, size; new_decl = lookup_decl (decl, ctx); TREE_TYPE (new_decl) = remap_type (TREE_TYPE (decl), &ctx->cb); if ((!TREE_CONSTANT (DECL_SIZE (new_decl)) || private_debug) && DECL_HAS_VALUE_EXPR_P (decl)) { tree ve = DECL_VALUE_EXPR (decl); walk_tree (&ve, copy_body_r, &ctx->cb, NULL); SET_DECL_VALUE_EXPR (new_decl, ve); DECL_HAS_VALUE_EXPR_P (new_decl) = 1; } if (!TREE_CONSTANT (DECL_SIZE (new_decl))) { size = remap_decl (DECL_SIZE (decl), &ctx->cb); if (size == error_mark_node) size = TYPE_SIZE (TREE_TYPE (new_decl)); DECL_SIZE (new_decl) = size; size = remap_decl (DECL_SIZE_UNIT (decl), &ctx->cb); if (size == error_mark_node) size = TYPE_SIZE_UNIT (TREE_TYPE (new_decl)); DECL_SIZE_UNIT (new_decl) = size; } } /* The callback for remap_decl. Search all containing contexts for a mapping of the variable; this avoids having to duplicate the splay tree ahead of time. We know a mapping doesn't already exist in the given context. Create new mappings to implement default semantics. */ static tree omp_copy_decl (tree var, copy_body_data *cb) { omp_context *ctx = (omp_context *) cb; tree new_var; if (TREE_CODE (var) == LABEL_DECL) { new_var = create_artificial_label (); DECL_CONTEXT (new_var) = current_function_decl; insert_decl_map (&ctx->cb, var, new_var); return new_var; } while (!is_parallel_ctx (ctx)) { ctx = ctx->outer; if (ctx == NULL) return var; new_var = maybe_lookup_decl (var, ctx); if (new_var) return new_var; } if (is_global_var (var) || decl_function_context (var) != ctx->cb.src_fn) return var; return error_mark_node; } /* Return the parallel region associated with STMT. */ /* Debugging dumps for parallel regions. */ void dump_omp_region (FILE *, struct omp_region *, int); void debug_omp_region (struct omp_region *); void debug_all_omp_regions (void); /* Dump the parallel region tree rooted at REGION. */ void dump_omp_region (FILE *file, struct omp_region *region, int indent) { fprintf (file, "%*sbb %d: %s\n", indent, "", region->entry->index, tree_code_name[region->type]); if (region->inner) dump_omp_region (file, region->inner, indent + 4); if (region->cont) { fprintf (file, "%*sbb %d: OMP_CONTINUE\n", indent, "", region->cont->index); } if (region->exit) fprintf (file, "%*sbb %d: OMP_RETURN\n", indent, "", region->exit->index); else fprintf (file, "%*s[no exit marker]\n", indent, ""); if (region->next) dump_omp_region (file, region->next, indent); } void debug_omp_region (struct omp_region *region) { dump_omp_region (stderr, region, 0); } void debug_all_omp_regions (void) { dump_omp_region (stderr, root_omp_region, 0); } /* Create a new parallel region starting at STMT inside region PARENT. */ struct omp_region * new_omp_region (basic_block bb, enum tree_code type, struct omp_region *parent) { struct omp_region *region = xcalloc (1, sizeof (*region)); region->outer = parent; region->entry = bb; region->type = type; if (parent) { /* This is a nested region. Add it to the list of inner regions in PARENT. */ region->next = parent->inner; parent->inner = region; } else { /* This is a toplevel region. Add it to the list of toplevel regions in ROOT_OMP_REGION. */ region->next = root_omp_region; root_omp_region = region; } return region; } /* Release the memory associated with the region tree rooted at REGION. */ static void free_omp_region_1 (struct omp_region *region) { struct omp_region *i, *n; for (i = region->inner; i ; i = n) { n = i->next; free_omp_region_1 (i); } free (region); } /* Release the memory for the entire omp region tree. */ void free_omp_regions (void) { struct omp_region *r, *n; for (r = root_omp_region; r ; r = n) { n = r->next; free_omp_region_1 (r); } root_omp_region = NULL; } /* Create a new context, with OUTER_CTX being the surrounding context. */ static omp_context * new_omp_context (tree stmt, omp_context *outer_ctx) { omp_context *ctx = XCNEW (omp_context); splay_tree_insert (all_contexts, (splay_tree_key) stmt, (splay_tree_value) ctx); ctx->stmt = stmt; if (outer_ctx) { ctx->outer = outer_ctx; ctx->cb = outer_ctx->cb; ctx->cb.block = NULL; ctx->depth = outer_ctx->depth + 1; } else { ctx->cb.src_fn = current_function_decl; ctx->cb.dst_fn = current_function_decl; ctx->cb.src_node = cgraph_node (current_function_decl); ctx->cb.dst_node = ctx->cb.src_node; ctx->cb.src_cfun = cfun; ctx->cb.copy_decl = omp_copy_decl; ctx->cb.eh_region = -1; ctx->cb.transform_call_graph_edges = CB_CGE_MOVE; ctx->depth = 1; } ctx->cb.decl_map = splay_tree_new (splay_tree_compare_pointers, 0, 0); return ctx; } /* Destroy a omp_context data structures. Called through the splay tree value delete callback. */ static void delete_omp_context (splay_tree_value value) { omp_context *ctx = (omp_context *) value; splay_tree_delete (ctx->cb.decl_map); if (ctx->field_map) splay_tree_delete (ctx->field_map); /* We hijacked DECL_ABSTRACT_ORIGIN earlier. We need to clear it before it produces corrupt debug information. */ if (ctx->record_type) { tree t; for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t)) DECL_ABSTRACT_ORIGIN (t) = NULL; } XDELETE (ctx); } /* Fix up RECEIVER_DECL with a type that has been remapped to the child context. */ static void fixup_child_record_type (omp_context *ctx) { tree f, type = ctx->record_type; /* ??? It isn't sufficient to just call remap_type here, because variably_modified_type_p doesn't work the way we expect for record types. Testing each field for whether it needs remapping and creating a new record by hand works, however. */ for (f = TYPE_FIELDS (type); f ; f = TREE_CHAIN (f)) if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn)) break; if (f) { tree name, new_fields = NULL; type = lang_hooks.types.make_type (RECORD_TYPE); name = DECL_NAME (TYPE_NAME (ctx->record_type)); name = build_decl (TYPE_DECL, name, type); TYPE_NAME (type) = name; for (f = TYPE_FIELDS (ctx->record_type); f ; f = TREE_CHAIN (f)) { tree new_f = copy_node (f); DECL_CONTEXT (new_f) = type; TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &ctx->cb); TREE_CHAIN (new_f) = new_fields; new_fields = new_f; /* Arrange to be able to look up the receiver field given the sender field. */ splay_tree_insert (ctx->field_map, (splay_tree_key) f, (splay_tree_value) new_f); } TYPE_FIELDS (type) = nreverse (new_fields); layout_type (type); } TREE_TYPE (ctx->receiver_decl) = build_pointer_type (type); } /* Instantiate decls as necessary in CTX to satisfy the data sharing specified by CLAUSES. */ static void scan_sharing_clauses (tree clauses, omp_context *ctx) { tree c, decl; bool scan_array_reductions = false; for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c)) { bool by_ref; switch (OMP_CLAUSE_CODE (c)) { case OMP_CLAUSE_PRIVATE: decl = OMP_CLAUSE_DECL (c); if (!is_variable_sized (decl)) install_var_local (decl, ctx); break; case OMP_CLAUSE_SHARED: gcc_assert (is_parallel_ctx (ctx)); decl = OMP_CLAUSE_DECL (c); gcc_assert (!is_variable_sized (decl)); by_ref = use_pointer_for_field (decl, true); /* Global variables don't need to be copied, the receiver side will use them directly. */ if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx))) break; if (! TREE_READONLY (decl) || TREE_ADDRESSABLE (decl) || by_ref || is_reference (decl)) { install_var_field (decl, by_ref, ctx); install_var_local (decl, ctx); break; } /* We don't need to copy const scalar vars back. */ OMP_CLAUSE_SET_CODE (c, OMP_CLAUSE_FIRSTPRIVATE); goto do_private; case OMP_CLAUSE_LASTPRIVATE: /* Let the corresponding firstprivate clause create the variable. */ if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c)) break; /* FALLTHRU */ case OMP_CLAUSE_FIRSTPRIVATE: case OMP_CLAUSE_REDUCTION: decl = OMP_CLAUSE_DECL (c); do_private: if (is_variable_sized (decl)) break; else if (is_parallel_ctx (ctx) && ! is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx))) { by_ref = use_pointer_for_field (decl, false); install_var_field (decl, by_ref, ctx); } install_var_local (decl, ctx); break; case OMP_CLAUSE_COPYPRIVATE: if (ctx->outer) scan_omp (&OMP_CLAUSE_DECL (c), ctx->outer); /* FALLTHRU */ case OMP_CLAUSE_COPYIN: decl = OMP_CLAUSE_DECL (c); by_ref = use_pointer_for_field (decl, false); install_var_field (decl, by_ref, ctx); break; case OMP_CLAUSE_DEFAULT: ctx->default_kind = OMP_CLAUSE_DEFAULT_KIND (c); break; case OMP_CLAUSE_IF: case OMP_CLAUSE_NUM_THREADS: case OMP_CLAUSE_SCHEDULE: if (ctx->outer) scan_omp (&OMP_CLAUSE_OPERAND (c, 0), ctx->outer); break; case OMP_CLAUSE_NOWAIT: case OMP_CLAUSE_ORDERED: break; default: gcc_unreachable (); } } for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c)) { switch (OMP_CLAUSE_CODE (c)) { case OMP_CLAUSE_LASTPRIVATE: /* Let the corresponding firstprivate clause create the variable. */ if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c)) break; /* FALLTHRU */ case OMP_CLAUSE_PRIVATE: case OMP_CLAUSE_FIRSTPRIVATE: case OMP_CLAUSE_REDUCTION: decl = OMP_CLAUSE_DECL (c); if (is_variable_sized (decl)) install_var_local (decl, ctx); fixup_remapped_decl (decl, ctx, OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE && OMP_CLAUSE_PRIVATE_DEBUG (c)); if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)) scan_array_reductions = true; break; case OMP_CLAUSE_SHARED: decl = OMP_CLAUSE_DECL (c); if (! is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx))) fixup_remapped_decl (decl, ctx, false); break; case OMP_CLAUSE_COPYPRIVATE: case OMP_CLAUSE_COPYIN: case OMP_CLAUSE_DEFAULT: case OMP_CLAUSE_IF: case OMP_CLAUSE_NUM_THREADS: case OMP_CLAUSE_SCHEDULE: case OMP_CLAUSE_NOWAIT: case OMP_CLAUSE_ORDERED: break; default: gcc_unreachable (); } } if (scan_array_reductions) for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c)) if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)) { scan_omp (&OMP_CLAUSE_REDUCTION_INIT (c), ctx); scan_omp (&OMP_CLAUSE_REDUCTION_MERGE (c), ctx); } } /* Create a new name for omp child function. Returns an identifier. */ static GTY(()) unsigned int tmp_ompfn_id_num; static tree create_omp_child_function_name (void) { tree name = DECL_ASSEMBLER_NAME (current_function_decl); size_t len = IDENTIFIER_LENGTH (name); char *tmp_name, *prefix; prefix = alloca (len + sizeof ("_omp_fn")); memcpy (prefix, IDENTIFIER_POINTER (name), len); strcpy (prefix + len, "_omp_fn"); #ifndef NO_DOT_IN_LABEL prefix[len] = '.'; #elif !defined NO_DOLLAR_IN_LABEL prefix[len] = '$'; #endif ASM_FORMAT_PRIVATE_NAME (tmp_name, prefix, tmp_ompfn_id_num++); return get_identifier (tmp_name); } /* Build a decl for the omp child function. It'll not contain a body yet, just the bare decl. */ static void create_omp_child_function (omp_context *ctx) { tree decl, type, name, t; name = create_omp_child_function_name (); type = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE); decl = build_decl (FUNCTION_DECL, name, type); decl = lang_hooks.decls.pushdecl (decl); ctx->cb.dst_fn = decl; TREE_STATIC (decl) = 1; TREE_USED (decl) = 1; DECL_ARTIFICIAL (decl) = 1; DECL_IGNORED_P (decl) = 0; TREE_PUBLIC (decl) = 0; DECL_UNINLINABLE (decl) = 1; DECL_EXTERNAL (decl) = 0; DECL_CONTEXT (decl) = NULL_TREE; DECL_INITIAL (decl) = make_node (BLOCK); t = build_decl (RESULT_DECL, NULL_TREE, void_type_node); DECL_ARTIFICIAL (t) = 1; DECL_IGNORED_P (t) = 1; DECL_RESULT (decl) = t; t = build_decl (PARM_DECL, get_identifier (".omp_data_i"), ptr_type_node); DECL_ARTIFICIAL (t) = 1; DECL_ARG_TYPE (t) = ptr_type_node; DECL_CONTEXT (t) = current_function_decl; TREE_USED (t) = 1; DECL_ARGUMENTS (decl) = t; ctx->receiver_decl = t; /* Allocate memory for the function structure. The call to allocate_struct_function clobbers CFUN, so we need to restore it afterward. */ allocate_struct_function (decl); DECL_SOURCE_LOCATION (decl) = EXPR_LOCATION (ctx->stmt); cfun->function_end_locus = EXPR_LOCATION (ctx->stmt); cfun = ctx->cb.src_cfun; } /* Scan an OpenMP parallel directive. */ static void scan_omp_parallel (tree *stmt_p, omp_context *outer_ctx) { omp_context *ctx; tree name; /* Ignore parallel directives with empty bodies, unless there are copyin clauses. */ if (optimize > 0 && empty_body_p (OMP_PARALLEL_BODY (*stmt_p)) && find_omp_clause (OMP_CLAUSES (*stmt_p), OMP_CLAUSE_COPYIN) == NULL) { *stmt_p = build_empty_stmt (); return; } ctx = new_omp_context (*stmt_p, outer_ctx); if (parallel_nesting_level > 1) ctx->is_nested = true; ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0); ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED; ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE); name = create_tmp_var_name (".omp_data_s"); name = build_decl (TYPE_DECL, name, ctx->record_type); TYPE_NAME (ctx->record_type) = name; create_omp_child_function (ctx); OMP_PARALLEL_FN (*stmt_p) = ctx->cb.dst_fn; scan_sharing_clauses (OMP_PARALLEL_CLAUSES (*stmt_p), ctx); scan_omp (&OMP_PARALLEL_BODY (*stmt_p), ctx); if (TYPE_FIELDS (ctx->record_type) == NULL) ctx->record_type = ctx->receiver_decl = NULL; else { layout_type (ctx->record_type); fixup_child_record_type (ctx); } } /* Scan an OpenMP loop directive. */ static void scan_omp_for (tree *stmt_p, omp_context *outer_ctx) { omp_context *ctx; tree stmt; stmt = *stmt_p; ctx = new_omp_context (stmt, outer_ctx); scan_sharing_clauses (OMP_FOR_CLAUSES (stmt), ctx); scan_omp (&OMP_FOR_PRE_BODY (stmt), ctx); scan_omp (&OMP_FOR_INIT (stmt), ctx); scan_omp (&OMP_FOR_COND (stmt), ctx); scan_omp (&OMP_FOR_INCR (stmt), ctx); scan_omp (&OMP_FOR_BODY (stmt), ctx); } /* Scan an OpenMP sections directive. */ static void scan_omp_sections (tree *stmt_p, omp_context *outer_ctx) { tree stmt; omp_context *ctx; stmt = *stmt_p; ctx = new_omp_context (stmt, outer_ctx); scan_sharing_clauses (OMP_SECTIONS_CLAUSES (stmt), ctx); scan_omp (&OMP_SECTIONS_BODY (stmt), ctx); } /* Scan an OpenMP single directive. */ static void scan_omp_single (tree *stmt_p, omp_context *outer_ctx) { tree stmt = *stmt_p; omp_context *ctx; tree name; ctx = new_omp_context (stmt, outer_ctx); ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0); ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE); name = create_tmp_var_name (".omp_copy_s"); name = build_decl (TYPE_DECL, name, ctx->record_type); TYPE_NAME (ctx->record_type) = name; scan_sharing_clauses (OMP_SINGLE_CLAUSES (stmt), ctx); scan_omp (&OMP_SINGLE_BODY (stmt), ctx); if (TYPE_FIELDS (ctx->record_type) == NULL) ctx->record_type = NULL; else layout_type (ctx->record_type); } /* Check OpenMP nesting restrictions. */ static void check_omp_nesting_restrictions (tree t, omp_context *ctx) { switch (TREE_CODE (t)) { case OMP_FOR: case OMP_SECTIONS: case OMP_SINGLE: for (; ctx != NULL; ctx = ctx->outer) switch (TREE_CODE (ctx->stmt)) { case OMP_FOR: case OMP_SECTIONS: case OMP_SINGLE: case OMP_ORDERED: case OMP_MASTER: warning (0, "work-sharing region may not be closely nested inside " "of work-sharing, critical, ordered or master region"); return; case OMP_PARALLEL: return; default: break; } break; case OMP_MASTER: for (; ctx != NULL; ctx = ctx->outer) switch (TREE_CODE (ctx->stmt)) { case OMP_FOR: case OMP_SECTIONS: case OMP_SINGLE: warning (0, "master region may not be closely nested inside " "of work-sharing region"); return; case OMP_PARALLEL: return; default: break; } break; case OMP_ORDERED: for (; ctx != NULL; ctx = ctx->outer) switch (TREE_CODE (ctx->stmt)) { case OMP_CRITICAL: warning (0, "ordered region may not be closely nested inside " "of critical region"); return; case OMP_FOR: if (find_omp_clause (OMP_CLAUSES (ctx->stmt), OMP_CLAUSE_ORDERED) == NULL) warning (0, "ordered region must be closely nested inside " "a loop region with an ordered clause"); return; case OMP_PARALLEL: return; default: break; } break; case OMP_CRITICAL: for (; ctx != NULL; ctx = ctx->outer) if (TREE_CODE (ctx->stmt) == OMP_CRITICAL && OMP_CRITICAL_NAME (t) == OMP_CRITICAL_NAME (ctx->stmt)) { warning (0, "critical region may not be nested inside a critical " "region with the same name"); return; } break; default: break; } } /* Callback for walk_stmts used to scan for OpenMP directives at TP. */ static tree scan_omp_1 (tree *tp, int *walk_subtrees, void *data) { struct walk_stmt_info *wi = data; omp_context *ctx = wi->info; tree t = *tp; if (EXPR_HAS_LOCATION (t)) input_location = EXPR_LOCATION (t); /* Check the OpenMP nesting restrictions. */ if (OMP_DIRECTIVE_P (t) && ctx != NULL) check_omp_nesting_restrictions (t, ctx); *walk_subtrees = 0; switch (TREE_CODE (t)) { case OMP_PARALLEL: parallel_nesting_level++; scan_omp_parallel (tp, ctx); parallel_nesting_level--; break; case OMP_FOR: scan_omp_for (tp, ctx); break; case OMP_SECTIONS: scan_omp_sections (tp, ctx); break; case OMP_SINGLE: scan_omp_single (tp, ctx); break; case OMP_SECTION: case OMP_MASTER: case OMP_ORDERED: case OMP_CRITICAL: ctx = new_omp_context (*tp, ctx); scan_omp (&OMP_BODY (*tp), ctx); break; case BIND_EXPR: { tree var; *walk_subtrees = 1; for (var = BIND_EXPR_VARS (t); var ; var = TREE_CHAIN (var)) insert_decl_map (&ctx->cb, var, var); } break; case VAR_DECL: case PARM_DECL: case LABEL_DECL: case RESULT_DECL: if (ctx) *tp = remap_decl (t, &ctx->cb); break; default: if (ctx && TYPE_P (t)) *tp = remap_type (t, &ctx->cb); else if (!DECL_P (t)) *walk_subtrees = 1; break; } return NULL_TREE; } /* Scan all the statements starting at STMT_P. CTX contains context information about the OpenMP directives and clauses found during the scan. */ static void scan_omp (tree *stmt_p, omp_context *ctx) { location_t saved_location; struct walk_stmt_info wi; memset (&wi, 0, sizeof (wi)); wi.callback = scan_omp_1; wi.info = ctx; wi.want_bind_expr = (ctx != NULL); wi.want_locations = true; saved_location = input_location; walk_stmts (&wi, stmt_p); input_location = saved_location; } /* Re-gimplification and code generation routines. */ /* Build a call to GOMP_barrier. */ static void build_omp_barrier (tree *stmt_list) { tree t; t = built_in_decls[BUILT_IN_GOMP_BARRIER]; t = build_function_call_expr (t, NULL); gimplify_and_add (t, stmt_list); } /* If a context was created for STMT when it was scanned, return it. */ static omp_context * maybe_lookup_ctx (tree stmt) { splay_tree_node n; n = splay_tree_lookup (all_contexts, (splay_tree_key) stmt); return n ? (omp_context *) n->value : NULL; } /* Find the mapping for DECL in CTX or the immediately enclosing context that has a mapping for DECL. If CTX is a nested parallel directive, we may have to use the decl mappings created in CTX's parent context. Suppose that we have the following parallel nesting (variable UIDs showed for clarity): iD.1562 = 0; #omp parallel shared(iD.1562) -> outer parallel iD.1562 = iD.1562 + 1; #omp parallel shared (iD.1562) -> inner parallel iD.1562 = iD.1562 - 1; Each parallel structure will create a distinct .omp_data_s structure for copying iD.1562 in/out of the directive: outer parallel .omp_data_s.1.i -> iD.1562 inner parallel .omp_data_s.2.i -> iD.1562 A shared variable mapping will produce a copy-out operation before the parallel directive and a copy-in operation after it. So, in this case we would have: iD.1562 = 0; .omp_data_o.1.i = iD.1562; #omp parallel shared(iD.1562) -> outer parallel .omp_data_i.1 = &.omp_data_o.1 .omp_data_i.1->i = .omp_data_i.1->i + 1; .omp_data_o.2.i = iD.1562; -> ** #omp parallel shared(iD.1562) -> inner parallel .omp_data_i.2 = &.omp_data_o.2 .omp_data_i.2->i = .omp_data_i.2->i - 1; ** This is a problem. The symbol iD.1562 cannot be referenced inside the body of the outer parallel region. But since we are emitting this copy operation while expanding the inner parallel directive, we need to access the CTX structure of the outer parallel directive to get the correct mapping: .omp_data_o.2.i = .omp_data_i.1->i Since there may be other workshare or parallel directives enclosing the parallel directive, it may be necessary to walk up the context parent chain. This is not a problem in general because nested parallelism happens only rarely. */ static tree lookup_decl_in_outer_ctx (tree decl, omp_context *ctx) { tree t; omp_context *up; gcc_assert (ctx->is_nested); for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer) t = maybe_lookup_decl (decl, up); gcc_assert (t || is_global_var (decl)); return t ? t : decl; } /* Similar to lookup_decl_in_outer_ctx, but return DECL if not found in outer contexts. */ static tree maybe_lookup_decl_in_outer_ctx (tree decl, omp_context *ctx) { tree t = NULL; omp_context *up; if (ctx->is_nested) for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer) t = maybe_lookup_decl (decl, up); return t ? t : decl; } /* Construct the initialization value for reduction CLAUSE. */ tree omp_reduction_init (tree clause, tree type) { switch (OMP_CLAUSE_REDUCTION_CODE (clause)) { case PLUS_EXPR: case MINUS_EXPR: case BIT_IOR_EXPR: case BIT_XOR_EXPR: case TRUTH_OR_EXPR: case TRUTH_ORIF_EXPR: case TRUTH_XOR_EXPR: case NE_EXPR: return fold_convert (type, integer_zero_node); case MULT_EXPR: case TRUTH_AND_EXPR: case TRUTH_ANDIF_EXPR: case EQ_EXPR: return fold_convert (type, integer_one_node); case BIT_AND_EXPR: return fold_convert (type, integer_minus_one_node); case MAX_EXPR: if (SCALAR_FLOAT_TYPE_P (type)) { REAL_VALUE_TYPE max, min; if (HONOR_INFINITIES (TYPE_MODE (type))) { real_inf (&max); real_arithmetic (&min, NEGATE_EXPR, &max, NULL); } else real_maxval (&min, 1, TYPE_MODE (type)); return build_real (type, min); } else { gcc_assert (INTEGRAL_TYPE_P (type)); return TYPE_MIN_VALUE (type); } case MIN_EXPR: if (SCALAR_FLOAT_TYPE_P (type)) { REAL_VALUE_TYPE max; if (HONOR_INFINITIES (TYPE_MODE (type))) real_inf (&max); else real_maxval (&max, 0, TYPE_MODE (type)); return build_real (type, max); } else { gcc_assert (INTEGRAL_TYPE_P (type)); return TYPE_MAX_VALUE (type); } default: gcc_unreachable (); } } /* Generate code to implement the input clauses, FIRSTPRIVATE and COPYIN, from the receiver (aka child) side and initializers for REFERENCE_TYPE private variables. Initialization statements go in ILIST, while calls to destructors go in DLIST. */ static void lower_rec_input_clauses (tree clauses, tree *ilist, tree *dlist, omp_context *ctx) { tree_stmt_iterator diter; tree c, dtor, copyin_seq, x, args, ptr; bool copyin_by_ref = false; bool lastprivate_firstprivate = false; int pass; *dlist = alloc_stmt_list (); diter = tsi_start (*dlist); copyin_seq = NULL; /* Do all the fixed sized types in the first pass, and the variable sized types in the second pass. This makes sure that the scalar arguments to the variable sized types are processed before we use them in the variable sized operations. */ for (pass = 0; pass < 2; ++pass) { for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c)) { enum omp_clause_code c_kind = OMP_CLAUSE_CODE (c); tree var, new_var; bool by_ref; switch (c_kind) { case OMP_CLAUSE_PRIVATE: if (OMP_CLAUSE_PRIVATE_DEBUG (c)) continue; break; case OMP_CLAUSE_SHARED: if (maybe_lookup_decl (OMP_CLAUSE_DECL (c), ctx) == NULL) { gcc_assert (is_global_var (OMP_CLAUSE_DECL (c))); continue; } case OMP_CLAUSE_FIRSTPRIVATE: case OMP_CLAUSE_COPYIN: case OMP_CLAUSE_REDUCTION: break; case OMP_CLAUSE_LASTPRIVATE: if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c)) { lastprivate_firstprivate = true; if (pass != 0) continue; } break; default: continue; } new_var = var = OMP_CLAUSE_DECL (c); if (c_kind != OMP_CLAUSE_COPYIN) new_var = lookup_decl (var, ctx); if (c_kind == OMP_CLAUSE_SHARED || c_kind == OMP_CLAUSE_COPYIN) { if (pass != 0) continue; } else if (is_variable_sized (var)) { /* For variable sized types, we need to allocate the actual storage here. Call alloca and store the result in the pointer decl that we created elsewhere. */ if (pass == 0) continue; ptr = DECL_VALUE_EXPR (new_var); gcc_assert (TREE_CODE (ptr) == INDIRECT_REF); ptr = TREE_OPERAND (ptr, 0); gcc_assert (DECL_P (ptr)); x = TYPE_SIZE_UNIT (TREE_TYPE (new_var)); args = tree_cons (NULL, x, NULL); x = built_in_decls[BUILT_IN_ALLOCA]; x = build_function_call_expr (x, args); x = fold_convert (TREE_TYPE (ptr), x); x = build2 (MODIFY_EXPR, void_type_node, ptr, x); gimplify_and_add (x, ilist); } else if (is_reference (var)) { /* For references that are being privatized for Fortran, allocate new backing storage for the new pointer variable. This allows us to avoid changing all the code that expects a pointer to something that expects a direct variable. Note that this doesn't apply to C++, since reference types are disallowed in data sharing clauses there, except for NRV optimized return values. */ if (pass == 0) continue; x = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var))); if (TREE_CONSTANT (x)) { const char *name = NULL; if (DECL_NAME (var)) name = IDENTIFIER_POINTER (DECL_NAME (new_var)); x = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var)), name); gimple_add_tmp_var (x); x = build_fold_addr_expr_with_type (x, TREE_TYPE (new_var)); } else { args = tree_cons (NULL, x, NULL); x = built_in_decls[BUILT_IN_ALLOCA]; x = build_function_call_expr (x, args); x = fold_convert (TREE_TYPE (new_var), x); } x = build2 (MODIFY_EXPR, void_type_node, new_var, x); gimplify_and_add (x, ilist); new_var = build_fold_indirect_ref (new_var); } else if (c_kind == OMP_CLAUSE_REDUCTION && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)) { if (pass == 0) continue; } else if (pass != 0) continue; switch (OMP_CLAUSE_CODE (c)) { case OMP_CLAUSE_SHARED: /* Shared global vars are just accessed directly. */ if (is_global_var (new_var)) break; /* Set up the DECL_VALUE_EXPR for shared variables now. This needs to be delayed until after fixup_child_record_type so that we get the correct type during the dereference. */ by_ref = use_pointer_for_field (var, true); x = build_receiver_ref (var, by_ref, ctx); SET_DECL_VALUE_EXPR (new_var, x); DECL_HAS_VALUE_EXPR_P (new_var) = 1; /* ??? If VAR is not passed by reference, and the variable hasn't been initialized yet, then we'll get a warning for the store into the omp_data_s structure. Ideally, we'd be able to notice this and not store anything at all, but we're generating code too early. Suppress the warning. */ if (!by_ref) TREE_NO_WARNING (var) = 1; break; case OMP_CLAUSE_LASTPRIVATE: if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c)) break; /* FALLTHRU */ case OMP_CLAUSE_PRIVATE: x = lang_hooks.decls.omp_clause_default_ctor (c, new_var); if (x) gimplify_and_add (x, ilist); /* FALLTHRU */ do_dtor: x = lang_hooks.decls.omp_clause_dtor (c, new_var); if (x) { dtor = x; gimplify_stmt (&dtor); tsi_link_before (&diter, dtor, TSI_SAME_STMT); } break; case OMP_CLAUSE_FIRSTPRIVATE: x = build_outer_var_ref (var, ctx); x = lang_hooks.decls.omp_clause_copy_ctor (c, new_var, x); gimplify_and_add (x, ilist); goto do_dtor; break; case OMP_CLAUSE_COPYIN: by_ref = use_pointer_for_field (var, false); x = build_receiver_ref (var, by_ref, ctx); x = lang_hooks.decls.omp_clause_assign_op (c, new_var, x); append_to_statement_list (x, &copyin_seq); copyin_by_ref |= by_ref; break; case OMP_CLAUSE_REDUCTION: if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)) { gimplify_and_add (OMP_CLAUSE_REDUCTION_INIT (c), ilist); OMP_CLAUSE_REDUCTION_INIT (c) = NULL; } else { x = omp_reduction_init (c, TREE_TYPE (new_var)); gcc_assert (TREE_CODE (TREE_TYPE (new_var)) != ARRAY_TYPE); x = build2 (MODIFY_EXPR, void_type_node, new_var, x); gimplify_and_add (x, ilist); } break; default: gcc_unreachable (); } } } /* The copyin sequence is not to be executed by the main thread, since that would result in self-copies. Perhaps not visible to scalars, but it certainly is to C++ operator=. */ if (copyin_seq) { x = built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM]; x = build_function_call_expr (x, NULL); x = build2 (NE_EXPR, boolean_type_node, x, build_int_cst (TREE_TYPE (x), 0)); x = build3 (COND_EXPR, void_type_node, x, copyin_seq, NULL); gimplify_and_add (x, ilist); } /* If any copyin variable is passed by reference, we must ensure the master thread doesn't modify it before it is copied over in all threads. Similarly for variables in both firstprivate and lastprivate clauses we need to ensure the lastprivate copying happens after firstprivate copying in all threads. */ if (copyin_by_ref || lastprivate_firstprivate) build_omp_barrier (ilist); } /* Generate code to implement the LASTPRIVATE clauses. This is used for both parallel and workshare constructs. PREDICATE may be NULL if it's always true. */ static void lower_lastprivate_clauses (tree clauses, tree predicate, tree *stmt_list, omp_context *ctx) { tree sub_list, x, c; /* Early exit if there are no lastprivate clauses. */ clauses = find_omp_clause (clauses, OMP_CLAUSE_LASTPRIVATE); if (clauses == NULL) { /* If this was a workshare clause, see if it had been combined with its parallel. In that case, look for the clauses on the parallel statement itself. */ if (is_parallel_ctx (ctx)) return; ctx = ctx->outer; if (ctx == NULL || !is_parallel_ctx (ctx)) return; clauses = find_omp_clause (OMP_PARALLEL_CLAUSES (ctx->stmt), OMP_CLAUSE_LASTPRIVATE); if (clauses == NULL) return; } sub_list = alloc_stmt_list (); for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c)) { tree var, new_var; if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_LASTPRIVATE) continue; var = OMP_CLAUSE_DECL (c); new_var = lookup_decl (var, ctx); x = build_outer_var_ref (var, ctx); if (is_reference (var)) new_var = build_fold_indirect_ref (new_var); x = lang_hooks.decls.omp_clause_assign_op (c, x, new_var); append_to_statement_list (x, &sub_list); } if (predicate) x = build3 (COND_EXPR, void_type_node, predicate, sub_list, NULL); else x = sub_list; gimplify_and_add (x, stmt_list); } /* Generate code to implement the REDUCTION clauses. */ static void lower_reduction_clauses (tree clauses, tree *stmt_list, omp_context *ctx) { tree sub_list = NULL, x, c; int count = 0; /* First see if there is exactly one reduction clause. Use OMP_ATOMIC update in that case, otherwise use a lock. */ for (c = clauses; c && count < 2; c = OMP_CLAUSE_CHAIN (c)) if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION) { if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)) { /* Never use OMP_ATOMIC for array reductions. */ count = -1; break; } count++; } if (count == 0) return; for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c)) { tree var, ref, new_var; enum tree_code code; if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION) continue; var = OMP_CLAUSE_DECL (c); new_var = lookup_decl (var, ctx); if (is_reference (var)) new_var = build_fold_indirect_ref (new_var); ref = build_outer_var_ref (var, ctx); code = OMP_CLAUSE_REDUCTION_CODE (c); /* reduction(-:var) sums up the partial results, so it acts identically to reduction(+:var). */ if (code == MINUS_EXPR) code = PLUS_EXPR; if (count == 1) { tree addr = build_fold_addr_expr (ref); addr = save_expr (addr); ref = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (addr)), addr); x = fold_build2 (code, TREE_TYPE (ref), ref, new_var); x = build2 (OMP_ATOMIC, void_type_node, addr, x); gimplify_and_add (x, stmt_list); return; } if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)) { tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c); if (is_reference (var)) ref = build_fold_addr_expr (ref); SET_DECL_VALUE_EXPR (placeholder, ref); DECL_HAS_VALUE_EXPR_P (placeholder) = 1; gimplify_and_add (OMP_CLAUSE_REDUCTION_MERGE (c), &sub_list); OMP_CLAUSE_REDUCTION_MERGE (c) = NULL; OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL; } else { x = build2 (code, TREE_TYPE (ref), ref, new_var); ref = build_outer_var_ref (var, ctx); x = build2 (MODIFY_EXPR, void_type_node, ref, x); append_to_statement_list (x, &sub_list); } } x = built_in_decls[BUILT_IN_GOMP_ATOMIC_START]; x = build_function_call_expr (x, NULL); gimplify_and_add (x, stmt_list); gimplify_and_add (sub_list, stmt_list); x = built_in_decls[BUILT_IN_GOMP_ATOMIC_END]; x = build_function_call_expr (x, NULL); gimplify_and_add (x, stmt_list); } /* Generate code to implement the COPYPRIVATE clauses. */ static void lower_copyprivate_clauses (tree clauses, tree *slist, tree *rlist, omp_context *ctx) { tree c; for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c)) { tree var, ref, x; bool by_ref; if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYPRIVATE) continue; var = OMP_CLAUSE_DECL (c); by_ref = use_pointer_for_field (var, false); ref = build_sender_ref (var, ctx); x = (ctx->is_nested) ? lookup_decl_in_outer_ctx (var, ctx) : var; x = by_ref ? build_fold_addr_expr (x) : x; x = build2 (MODIFY_EXPR, void_type_node, ref, x); gimplify_and_add (x, slist); ref = build_receiver_ref (var, by_ref, ctx); if (is_reference (var)) { ref = build_fold_indirect_ref (ref); var = build_fold_indirect_ref (var); } x = lang_hooks.decls.omp_clause_assign_op (c, var, ref); gimplify_and_add (x, rlist); } } /* Generate code to implement the clauses, FIRSTPRIVATE, COPYIN, LASTPRIVATE, and REDUCTION from the sender (aka parent) side. */ static void lower_send_clauses (tree clauses, tree *ilist, tree *olist, omp_context *ctx) { tree c; for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c)) { tree val, ref, x, var; bool by_ref, do_in = false, do_out = false; switch (OMP_CLAUSE_CODE (c)) { case OMP_CLAUSE_FIRSTPRIVATE: case OMP_CLAUSE_COPYIN: case OMP_CLAUSE_LASTPRIVATE: case OMP_CLAUSE_REDUCTION: break; default: continue; } var = val = OMP_CLAUSE_DECL (c); if (ctx->is_nested) var = lookup_decl_in_outer_ctx (val, ctx); if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYIN && is_global_var (var)) continue; if (is_variable_sized (val)) continue; by_ref = use_pointer_for_field (val, false); switch (OMP_CLAUSE_CODE (c)) { case OMP_CLAUSE_FIRSTPRIVATE: case OMP_CLAUSE_COPYIN: do_in = true; break; case OMP_CLAUSE_LASTPRIVATE: if (by_ref || is_reference (val)) { if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c)) continue; do_in = true; } else do_out = true; break; case OMP_CLAUSE_REDUCTION: do_in = true; do_out = !(by_ref || is_reference (val)); break; default: gcc_unreachable (); } if (do_in) { ref = build_sender_ref (val, ctx); x = by_ref ? build_fold_addr_expr (var) : var; x = build2 (MODIFY_EXPR, void_type_node, ref, x); gimplify_and_add (x, ilist); } if (do_out) { ref = build_sender_ref (val, ctx); x = build2 (MODIFY_EXPR, void_type_node, var, ref); gimplify_and_add (x, olist); } } } /* Generate code to implement SHARED from the sender (aka parent) side. This is trickier, since OMP_PARALLEL_CLAUSES doesn't list things that got automatically shared. */ static void lower_send_shared_vars (tree *ilist, tree *olist, omp_context *ctx) { tree var, ovar, nvar, f, x; if (ctx->record_type == NULL) return; for (f = TYPE_FIELDS (ctx->record_type); f ; f = TREE_CHAIN (f)) { ovar = DECL_ABSTRACT_ORIGIN (f); nvar = maybe_lookup_decl (ovar, ctx); if (!nvar || !DECL_HAS_VALUE_EXPR_P (nvar)) continue; var = ovar; /* If CTX is a nested parallel directive. Find the immediately enclosing parallel or workshare construct that contains a mapping for OVAR. */ if (ctx->is_nested) var = lookup_decl_in_outer_ctx (ovar, ctx); if (use_pointer_for_field (ovar, true)) { x = build_sender_ref (ovar, ctx); var = build_fold_addr_expr (var); x = build2 (MODIFY_EXPR, void_type_node, x, var); gimplify_and_add (x, ilist); } else { x = build_sender_ref (ovar, ctx); x = build2 (MODIFY_EXPR, void_type_node, x, var); gimplify_and_add (x, ilist); x = build_sender_ref (ovar, ctx); x = build2 (MODIFY_EXPR, void_type_node, var, x); gimplify_and_add (x, olist); } } } /* Build the function calls to GOMP_parallel_start etc to actually generate the parallel operation. REGION is the parallel region being expanded. BB is the block where to insert the code. WS_ARGS will be set if this is a call to a combined parallel+workshare construct, it contains the list of additional arguments needed by the workshare construct. */ static void expand_parallel_call (struct omp_region *region, basic_block bb, tree entry_stmt, tree ws_args) { tree t, args, val, cond, c, list, clauses; block_stmt_iterator si; int start_ix; clauses = OMP_PARALLEL_CLAUSES (entry_stmt); push_gimplify_context (); /* Determine what flavor of GOMP_parallel_start we will be emitting. */ start_ix = BUILT_IN_GOMP_PARALLEL_START; if (is_combined_parallel (region)) { switch (region->inner->type) { case OMP_FOR: start_ix = BUILT_IN_GOMP_PARALLEL_LOOP_STATIC_START + region->inner->sched_kind; break; case OMP_SECTIONS: start_ix = BUILT_IN_GOMP_PARALLEL_SECTIONS_START; break; default: gcc_unreachable (); } } /* By default, the value of NUM_THREADS is zero (selected at run time) and there is no conditional. */ cond = NULL_TREE; val = build_int_cst (unsigned_type_node, 0); c = find_omp_clause (clauses, OMP_CLAUSE_IF); if (c) cond = OMP_CLAUSE_IF_EXPR (c); c = find_omp_clause (clauses, OMP_CLAUSE_NUM_THREADS); if (c) val = OMP_CLAUSE_NUM_THREADS_EXPR (c); /* Ensure 'val' is of the correct type. */ val = fold_convert (unsigned_type_node, val); /* If we found the clause 'if (cond)', build either (cond != 0) or (cond ? val : 1u). */ if (cond) { block_stmt_iterator si; cond = gimple_boolify (cond); if (integer_zerop (val)) val = build2 (EQ_EXPR, unsigned_type_node, cond, build_int_cst (TREE_TYPE (cond), 0)); else { basic_block cond_bb, then_bb, else_bb; edge e; tree t, then_lab, else_lab, tmp; tmp = create_tmp_var (TREE_TYPE (val), NULL); e = split_block (bb, NULL); cond_bb = e->src; bb = e->dest; remove_edge (e); then_bb = create_empty_bb (cond_bb); else_bb = create_empty_bb (then_bb); then_lab = create_artificial_label (); else_lab = create_artificial_label (); t = build3 (COND_EXPR, void_type_node, cond, build_and_jump (&then_lab), build_and_jump (&else_lab)); si = bsi_start (cond_bb); bsi_insert_after (&si, t, BSI_CONTINUE_LINKING); si = bsi_start (then_bb); t = build1 (LABEL_EXPR, void_type_node, then_lab); bsi_insert_after (&si, t, BSI_CONTINUE_LINKING); t = build2 (MODIFY_EXPR, void_type_node, tmp, val); bsi_insert_after (&si, t, BSI_CONTINUE_LINKING); si = bsi_start (else_bb); t = build1 (LABEL_EXPR, void_type_node, else_lab); bsi_insert_after (&si, t, BSI_CONTINUE_LINKING); t = build2 (MODIFY_EXPR, void_type_node, tmp, build_int_cst (unsigned_type_node, 1)); bsi_insert_after (&si, t, BSI_CONTINUE_LINKING); make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE); make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE); make_edge (then_bb, bb, EDGE_FALLTHRU); make_edge (else_bb, bb, EDGE_FALLTHRU); val = tmp; } list = NULL_TREE; val = get_formal_tmp_var (val, &list); si = bsi_start (bb); bsi_insert_after (&si, list, BSI_CONTINUE_LINKING); } list = NULL_TREE; args = tree_cons (NULL, val, NULL); t = OMP_PARALLEL_DATA_ARG (entry_stmt); if (t == NULL) t = null_pointer_node; else t = build_fold_addr_expr (t); args = tree_cons (NULL, t, args); t = build_fold_addr_expr (OMP_PARALLEL_FN (entry_stmt)); args = tree_cons (NULL, t, args); if (ws_args) args = chainon (args, ws_args); t = built_in_decls[start_ix]; t = build_function_call_expr (t, args); gimplify_and_add (t, &list); t = OMP_PARALLEL_DATA_ARG (entry_stmt); if (t == NULL) t = null_pointer_node; else t = build_fold_addr_expr (t); args = tree_cons (NULL, t, NULL); t = build_function_call_expr (OMP_PARALLEL_FN (entry_stmt), args); gimplify_and_add (t, &list); t = built_in_decls[BUILT_IN_GOMP_PARALLEL_END]; t = build_function_call_expr (t, NULL); gimplify_and_add (t, &list); si = bsi_last (bb); bsi_insert_after (&si, list, BSI_CONTINUE_LINKING); pop_gimplify_context (NULL_TREE); } /* If exceptions are enabled, wrap *STMT_P in a MUST_NOT_THROW catch handler. This prevents programs from violating the structured block semantics with throws. */ static void maybe_catch_exception (tree *stmt_p) { tree f, t; if (!flag_exceptions) return; if (lang_protect_cleanup_actions) t = lang_protect_cleanup_actions (); else { t = built_in_decls[BUILT_IN_TRAP]; t = build_function_call_expr (t, NULL); } f = build2 (EH_FILTER_EXPR, void_type_node, NULL, NULL); EH_FILTER_MUST_NOT_THROW (f) = 1; gimplify_and_add (t, &EH_FILTER_FAILURE (f)); t = build2 (TRY_CATCH_EXPR, void_type_node, *stmt_p, NULL); append_to_statement_list (f, &TREE_OPERAND (t, 1)); *stmt_p = NULL; append_to_statement_list (t, stmt_p); } /* Chain all the DECLs in LIST by their TREE_CHAIN fields. */ static tree list2chain (tree list) { tree t; for (t = list; t; t = TREE_CHAIN (t)) { tree var = TREE_VALUE (t); if (TREE_CHAIN (t)) TREE_CHAIN (var) = TREE_VALUE (TREE_CHAIN (t)); else TREE_CHAIN (var) = NULL_TREE; } return list ? TREE_VALUE (list) : NULL_TREE; } /* Remove barriers in REGION->EXIT's block. Note that this is only valid for OMP_PARALLEL regions. Since the end of a parallel region is an implicit barrier, any workshare inside the OMP_PARALLEL that left a barrier at the end of the OMP_PARALLEL region can now be removed. */ static void remove_exit_barrier (struct omp_region *region) { block_stmt_iterator si; basic_block exit_bb; edge_iterator ei; edge e; tree t; exit_bb = region->exit; /* If the parallel region doesn't return, we don't have REGION->EXIT block at all. */ if (! exit_bb) return; /* The last insn in the block will be the parallel's OMP_RETURN. The workshare's OMP_RETURN will be in a preceding block. The kinds of statements that can appear in between are extremely limited -- no memory operations at all. Here, we allow nothing at all, so the only thing we allow to precede this OMP_RETURN is a label. */ si = bsi_last (exit_bb); gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_RETURN); bsi_prev (&si); if (!bsi_end_p (si) && TREE_CODE (bsi_stmt (si)) != LABEL_EXPR) return; FOR_EACH_EDGE (e, ei, exit_bb->preds) { si = bsi_last (e->src); if (bsi_end_p (si)) continue; t = bsi_stmt (si); if (TREE_CODE (t) == OMP_RETURN) OMP_RETURN_NOWAIT (t) = 1; } } static void remove_exit_barriers (struct omp_region *region) { if (region->type == OMP_PARALLEL) remove_exit_barrier (region); if (region->inner) { region = region->inner; remove_exit_barriers (region); while (region->next) { region = region->next; remove_exit_barriers (region); } } } /* Expand the OpenMP parallel directive starting at REGION. */ static void expand_omp_parallel (struct omp_region *region) { basic_block entry_bb, exit_bb, new_bb; struct function *child_cfun, *saved_cfun; tree child_fn, block, t, ws_args; block_stmt_iterator si; tree entry_stmt; edge e; bool do_cleanup_cfg = false; entry_stmt = last_stmt (region->entry); child_fn = OMP_PARALLEL_FN (entry_stmt); child_cfun = DECL_STRUCT_FUNCTION (child_fn); saved_cfun = cfun; entry_bb = region->entry; exit_bb = region->exit; if (is_combined_parallel (region)) ws_args = region->ws_args; else ws_args = NULL_TREE; if (child_cfun->cfg) { /* Due to inlining, it may happen that we have already outlined the region, in which case all we need to do is make the sub-graph unreachable and emit the parallel call. */ edge entry_succ_e, exit_succ_e; block_stmt_iterator si; entry_succ_e = single_succ_edge (entry_bb); si = bsi_last (entry_bb); gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_PARALLEL); bsi_remove (&si, true); new_bb = entry_bb; remove_edge (entry_succ_e); if (exit_bb) { exit_succ_e = single_succ_edge (exit_bb); make_edge (new_bb, exit_succ_e->dest, EDGE_FALLTHRU); } do_cleanup_cfg = true; } else { /* If the parallel region needs data sent from the parent function, then the very first statement (except possible tree profile counter updates) of the parallel body is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since &.OMP_DATA_O is passed as an argument to the child function, we need to replace it with the argument as seen by the child function. In most cases, this will end up being the identity assignment .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had a function call that has been inlined, the original PARM_DECL .OMP_DATA_I may have been converted into a different local variable. In which case, we need to keep the assignment. */ if (OMP_PARALLEL_DATA_ARG (entry_stmt)) { basic_block entry_succ_bb = single_succ (entry_bb); block_stmt_iterator si; for (si = bsi_start (entry_succ_bb); ; bsi_next (&si)) { tree stmt, arg; gcc_assert (!bsi_end_p (si)); stmt = bsi_stmt (si); if (TREE_CODE (stmt) != MODIFY_EXPR) continue; arg = TREE_OPERAND (stmt, 1); STRIP_NOPS (arg); if (TREE_CODE (arg) == ADDR_EXPR && TREE_OPERAND (arg, 0) == OMP_PARALLEL_DATA_ARG (entry_stmt)) { if (TREE_OPERAND (stmt, 0) == DECL_ARGUMENTS (child_fn)) bsi_remove (&si, true); else TREE_OPERAND (stmt, 1) = DECL_ARGUMENTS (child_fn); break; } } } /* Declare local variables needed in CHILD_CFUN. */ block = DECL_INITIAL (child_fn); BLOCK_VARS (block) = list2chain (child_cfun->unexpanded_var_list); DECL_SAVED_TREE (child_fn) = single_succ (entry_bb)->stmt_list; /* Reset DECL_CONTEXT on locals and function arguments. */ for (t = BLOCK_VARS (block); t; t = TREE_CHAIN (t)) DECL_CONTEXT (t) = child_fn; for (t = DECL_ARGUMENTS (child_fn); t; t = TREE_CHAIN (t)) DECL_CONTEXT (t) = child_fn; /* Split ENTRY_BB at OMP_PARALLEL so that it can be moved to the child function. */ si = bsi_last (entry_bb); t = bsi_stmt (si); gcc_assert (t && TREE_CODE (t) == OMP_PARALLEL); bsi_remove (&si, true); e = split_block (entry_bb, t); entry_bb = e->dest; single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU; /* Move the parallel region into CHILD_CFUN. We need to reset dominance information because the expansion of the inner regions has invalidated it. */ free_dominance_info (CDI_DOMINATORS); new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb); if (exit_bb) single_succ_edge (new_bb)->flags = EDGE_FALLTHRU; cgraph_add_new_function (child_fn); /* Convert OMP_RETURN into a RETURN_EXPR. */ if (exit_bb) { si = bsi_last (exit_bb); gcc_assert (!bsi_end_p (si) && TREE_CODE (bsi_stmt (si)) == OMP_RETURN); t = build1 (RETURN_EXPR, void_type_node, NULL); bsi_insert_after (&si, t, BSI_SAME_STMT); bsi_remove (&si, true); } } /* Emit a library call to launch the children threads. */ expand_parallel_call (region, new_bb, entry_stmt, ws_args); if (do_cleanup_cfg) { /* Clean up the unreachable sub-graph we created above. */ free_dominance_info (CDI_DOMINATORS); free_dominance_info (CDI_POST_DOMINATORS); cleanup_tree_cfg (); } } /* A subroutine of expand_omp_for. Generate code for a parallel loop with any schedule. Given parameters: for (V = N1; V cond N2; V += STEP) BODY; where COND is "<" or ">", we generate pseudocode more = GOMP_loop_foo_start (N1, N2, STEP, CHUNK, &istart0, &iend0); if (more) goto L0; else goto L3; L0: V = istart0; iend = iend0; L1: BODY; V += STEP; if (V cond iend) goto L1; else goto L2; L2: if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3; L3: If this is a combined omp parallel loop, instead of the call to GOMP_loop_foo_start, we emit 'goto L3'. */ static void expand_omp_for_generic (struct omp_region *region, struct omp_for_data *fd, enum built_in_function start_fn, enum built_in_function next_fn) { tree l0, l1, l2 = NULL, l3 = NULL; tree type, istart0, iend0, iend; tree t, args, list; basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb; basic_block l2_bb = NULL, l3_bb = NULL; block_stmt_iterator si; bool in_combined_parallel = is_combined_parallel (region); type = TREE_TYPE (fd->v); istart0 = create_tmp_var (long_integer_type_node, ".istart0"); iend0 = create_tmp_var (long_integer_type_node, ".iend0"); iend = create_tmp_var (type, NULL); TREE_ADDRESSABLE (istart0) = 1; TREE_ADDRESSABLE (iend0) = 1; gcc_assert ((region->cont != NULL) ^ (region->exit == NULL)); entry_bb = region->entry; l0_bb = create_empty_bb (entry_bb); l1_bb = single_succ (entry_bb); l0 = tree_block_label (l0_bb); l1 = tree_block_label (l1_bb); cont_bb = region->cont; exit_bb = region->exit; if (cont_bb) { l2_bb = create_empty_bb (cont_bb); l3_bb = single_succ (cont_bb); l2 = tree_block_label (l2_bb); l3 = tree_block_label (l3_bb); } si = bsi_last (entry_bb); gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_FOR); if (!in_combined_parallel) { /* If this is not a combined parallel loop, emit a call to GOMP_loop_foo_start in ENTRY_BB. */ list = alloc_stmt_list (); t = build_fold_addr_expr (iend0); args = tree_cons (NULL, t, NULL); t = build_fold_addr_expr (istart0); args = tree_cons (NULL, t, args); if (fd->chunk_size) { t = fold_convert (long_integer_type_node, fd->chunk_size); args = tree_cons (NULL, t, args); } t = fold_convert (long_integer_type_node, fd->step); args = tree_cons (NULL, t, args); t = fold_convert (long_integer_type_node, fd->n2); args = tree_cons (NULL, t, args); t = fold_convert (long_integer_type_node, fd->n1); args = tree_cons (NULL, t, args); t = build_function_call_expr (built_in_decls[start_fn], args); t = get_formal_tmp_var (t, &list); if (cont_bb) { t = build3 (COND_EXPR, void_type_node, t, build_and_jump (&l0), build_and_jump (&l3)); append_to_statement_list (t, &list); } bsi_insert_after (&si, list, BSI_SAME_STMT); } bsi_remove (&si, true); /* Iteration setup for sequential loop goes in L0_BB. */ list = alloc_stmt_list (); t = fold_convert (type, istart0); t = build2 (MODIFY_EXPR, void_type_node, fd->v, t); gimplify_and_add (t, &list); t = fold_convert (type, iend0); t = build2 (MODIFY_EXPR, void_type_node, iend, t); gimplify_and_add (t, &list); si = bsi_start (l0_bb); bsi_insert_after (&si, list, BSI_CONTINUE_LINKING); /* Handle the rare case where BODY doesn't ever return. */ if (cont_bb == NULL) { remove_edge (single_succ_edge (entry_bb)); make_edge (entry_bb, l0_bb, EDGE_FALLTHRU); make_edge (l0_bb, l1_bb, EDGE_FALLTHRU); return; } /* Code to control the increment and predicate for the sequential loop goes in the first half of EXIT_BB (we split EXIT_BB so that we can inherit all the edges going out of the loop body). */ list = alloc_stmt_list (); t = build2 (PLUS_EXPR, type, fd->v, fd->step); t = build2 (MODIFY_EXPR, void_type_node, fd->v, t); gimplify_and_add (t, &list); t = build2 (fd->cond_code, boolean_type_node, fd->v, iend); t = get_formal_tmp_var (t, &list); t = build3 (COND_EXPR, void_type_node, t, build_and_jump (&l1), build_and_jump (&l2)); append_to_statement_list (t, &list); si = bsi_last (cont_bb); bsi_insert_after (&si, list, BSI_SAME_STMT); gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_CONTINUE); bsi_remove (&si, true); /* Emit code to get the next parallel iteration in L2_BB. */ list = alloc_stmt_list (); t = build_fold_addr_expr (iend0); args = tree_cons (NULL, t, NULL); t = build_fold_addr_expr (istart0); args = tree_cons (NULL, t, args); t = build_function_call_expr (built_in_decls[next_fn], args); t = get_formal_tmp_var (t, &list); t = build3 (COND_EXPR, void_type_node, t, build_and_jump (&l0), build_and_jump (&l3)); append_to_statement_list (t, &list); si = bsi_start (l2_bb); bsi_insert_after (&si, list, BSI_CONTINUE_LINKING); /* Add the loop cleanup function. */ si = bsi_last (exit_bb); if (OMP_RETURN_NOWAIT (bsi_stmt (si))) t = built_in_decls[BUILT_IN_GOMP_LOOP_END_NOWAIT]; else t = built_in_decls[BUILT_IN_GOMP_LOOP_END]; t = build_function_call_expr (t, NULL); bsi_insert_after (&si, t, BSI_SAME_STMT); bsi_remove (&si, true); /* Connect the new blocks. */ remove_edge (single_succ_edge (entry_bb)); if (in_combined_parallel) make_edge (entry_bb, l2_bb, EDGE_FALLTHRU); else { make_edge (entry_bb, l0_bb, EDGE_TRUE_VALUE); make_edge (entry_bb, l3_bb, EDGE_FALSE_VALUE); } make_edge (l0_bb, l1_bb, EDGE_FALLTHRU); remove_edge (single_succ_edge (cont_bb)); make_edge (cont_bb, l1_bb, EDGE_TRUE_VALUE); make_edge (cont_bb, l2_bb, EDGE_FALSE_VALUE); make_edge (l2_bb, l0_bb, EDGE_TRUE_VALUE); make_edge (l2_bb, l3_bb, EDGE_FALSE_VALUE); } /* A subroutine of expand_omp_for. Generate code for a parallel loop with static schedule and no specified chunk size. Given parameters: for (V = N1; V cond N2; V += STEP) BODY; where COND is "<" or ">", we generate pseudocode if (cond is <) adj = STEP - 1; else adj = STEP + 1; n = (adj + N2 - N1) / STEP; q = n / nthreads; q += (q * nthreads != n); s0 = q * threadid; e0 = min(s0 + q, n); if (s0 >= e0) goto L2; else goto L0; L0: V = s0 * STEP + N1; e = e0 * STEP + N1; L1: BODY; V += STEP; if (V cond e) goto L1; L2: */ static void expand_omp_for_static_nochunk (struct omp_region *region, struct omp_for_data *fd) { tree l0, l1, l2, n, q, s0, e0, e, t, nthreads, threadid; tree type, list; basic_block entry_bb, exit_bb, seq_start_bb, body_bb, cont_bb; basic_block fin_bb; block_stmt_iterator si; type = TREE_TYPE (fd->v); entry_bb = region->entry; seq_start_bb = create_empty_bb (entry_bb); body_bb = single_succ (entry_bb); cont_bb = region->cont; fin_bb = single_succ (cont_bb); exit_bb = region->exit; l0 = tree_block_label (seq_start_bb); l1 = tree_block_label (body_bb); l2 = tree_block_label (fin_bb); /* Iteration space partitioning goes in ENTRY_BB. */ list = alloc_stmt_list (); t = built_in_decls[BUILT_IN_OMP_GET_NUM_THREADS]; t = build_function_call_expr (t, NULL); t = fold_convert (type, t); nthreads = get_formal_tmp_var (t, &list); t = built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM]; t = build_function_call_expr (t, NULL); t = fold_convert (type, t); threadid = get_formal_tmp_var (t, &list); fd->n1 = fold_convert (type, fd->n1); if (!is_gimple_val (fd->n1)) fd->n1 = get_formal_tmp_var (fd->n1, &list); fd->n2 = fold_convert (type, fd->n2); if (!is_gimple_val (fd->n2)) fd->n2 = get_formal_tmp_var (fd->n2, &list); fd->step = fold_convert (type, fd->step); if (!is_gimple_val (fd->step)) fd->step = get_formal_tmp_var (fd->step, &list); t = build_int_cst (type, (fd->cond_code == LT_EXPR ? -1 : 1)); t = fold_build2 (PLUS_EXPR, type, fd->step, t); t = fold_build2 (PLUS_EXPR, type, t, fd->n2); t = fold_build2 (MINUS_EXPR, type, t, fd->n1); t = fold_build2 (TRUNC_DIV_EXPR, type, t, fd->step); t = fold_convert (type, t); if (is_gimple_val (t)) n = t; else n = get_formal_tmp_var (t, &list); t = build2 (TRUNC_DIV_EXPR, type, n, nthreads); q = get_formal_tmp_var (t, &list); t = build2 (MULT_EXPR, type, q, nthreads); t = build2 (NE_EXPR, type, t, n); t = build2 (PLUS_EXPR, type, q, t); q = get_formal_tmp_var (t, &list); t = build2 (MULT_EXPR, type, q, threadid); s0 = get_formal_tmp_var (t, &list); t = build2 (PLUS_EXPR, type, s0, q); t = build2 (MIN_EXPR, type, t, n); e0 = get_formal_tmp_var (t, &list); t = build2 (GE_EXPR, boolean_type_node, s0, e0); t = build3 (COND_EXPR, void_type_node, t, build_and_jump (&l2), build_and_jump (&l0)); append_to_statement_list (t, &list); si = bsi_last (entry_bb); gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_FOR); bsi_insert_after (&si, list, BSI_SAME_STMT); bsi_remove (&si, true); /* Setup code for sequential iteration goes in SEQ_START_BB. */ list = alloc_stmt_list (); t = fold_convert (type, s0); t = build2 (MULT_EXPR, type, t, fd->step); t = build2 (PLUS_EXPR, type, t, fd->n1); t = build2 (MODIFY_EXPR, void_type_node, fd->v, t); gimplify_and_add (t, &list); t = fold_convert (type, e0); t = build2 (MULT_EXPR, type, t, fd->step); t = build2 (PLUS_EXPR, type, t, fd->n1); e = get_formal_tmp_var (t, &list); si = bsi_start (seq_start_bb); bsi_insert_after (&si, list, BSI_CONTINUE_LINKING); /* The code controlling the sequential loop replaces the OMP_CONTINUE. */ list = alloc_stmt_list (); t = build2 (PLUS_EXPR, type, fd->v, fd->step); t = build2 (MODIFY_EXPR, void_type_node, fd->v, t); gimplify_and_add (t, &list); t = build2 (fd->cond_code, boolean_type_node, fd->v, e); t = get_formal_tmp_var (t, &list); t = build3 (COND_EXPR, void_type_node, t, build_and_jump (&l1), build_and_jump (&l2)); append_to_statement_list (t, &list); si = bsi_last (cont_bb); gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_CONTINUE); bsi_insert_after (&si, list, BSI_SAME_STMT); bsi_remove (&si, true); /* Replace the OMP_RETURN with a barrier, or nothing. */ si = bsi_last (exit_bb); if (!OMP_RETURN_NOWAIT (bsi_stmt (si))) { list = alloc_stmt_list (); build_omp_barrier (&list); bsi_insert_after (&si, list, BSI_SAME_STMT); } bsi_remove (&si, true); /* Connect all the blocks. */ make_edge (seq_start_bb, body_bb, EDGE_FALLTHRU); remove_edge (single_succ_edge (entry_bb)); make_edge (entry_bb, fin_bb, EDGE_TRUE_VALUE); make_edge (entry_bb, seq_start_bb, EDGE_FALSE_VALUE); make_edge (cont_bb, body_bb, EDGE_TRUE_VALUE); find_edge (cont_bb, fin_bb)->flags = EDGE_FALSE_VALUE; } /* A subroutine of expand_omp_for. Generate code for a parallel loop with static schedule and a specified chunk size. Given parameters: for (V = N1; V cond N2; V += STEP) BODY; where COND is "<" or ">", we generate pseudocode if (cond is <) adj = STEP - 1; else adj = STEP + 1; n = (adj + N2 - N1) / STEP; trip = 0; L0: s0 = (trip * nthreads + threadid) * CHUNK; e0 = min(s0 + CHUNK, n); if (s0 < n) goto L1; else goto L4; L1: V = s0 * STEP + N1; e = e0 * STEP + N1; L2: BODY; V += STEP; if (V cond e) goto L2; else goto L3; L3: trip += 1; goto L0; L4: */ static void expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd) { tree l0, l1, l2, l3, l4, n, s0, e0, e, t; tree trip, nthreads, threadid; tree type; basic_block entry_bb, exit_bb, body_bb, seq_start_bb, iter_part_bb; basic_block trip_update_bb, cont_bb, fin_bb; tree list; block_stmt_iterator si; type = TREE_TYPE (fd->v); entry_bb = region->entry; iter_part_bb = create_empty_bb (entry_bb); seq_start_bb = create_empty_bb (iter_part_bb); body_bb = single_succ (entry_bb); cont_bb = region->cont; trip_update_bb = create_empty_bb (cont_bb); fin_bb = single_succ (cont_bb); exit_bb = region->exit; l0 = tree_block_label (iter_part_bb); l1 = tree_block_label (seq_start_bb); l2 = tree_block_label (body_bb); l3 = tree_block_label (trip_update_bb); l4 = tree_block_label (fin_bb); /* Trip and adjustment setup goes in ENTRY_BB. */ list = alloc_stmt_list (); t = built_in_decls[BUILT_IN_OMP_GET_NUM_THREADS]; t = build_function_call_expr (t, NULL); t = fold_convert (type, t); nthreads = get_formal_tmp_var (t, &list); t = built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM]; t = build_function_call_expr (t, NULL); t = fold_convert (type, t); threadid = get_formal_tmp_var (t, &list); fd->n1 = fold_convert (type, fd->n1); if (!is_gimple_val (fd->n1)) fd->n1 = get_formal_tmp_var (fd->n1, &list); fd->n2 = fold_convert (type, fd->n2); if (!is_gimple_val (fd->n2)) fd->n2 = get_formal_tmp_var (fd->n2, &list); fd->step = fold_convert (type, fd->step); if (!is_gimple_val (fd->step)) fd->step = get_formal_tmp_var (fd->step, &list); fd->chunk_size = fold_convert (type, fd->chunk_size); if (!is_gimple_val (fd->chunk_size)) fd->chunk_size = get_formal_tmp_var (fd->chunk_size, &list); t = build_int_cst (type, (fd->cond_code == LT_EXPR ? -1 : 1)); t = fold_build2 (PLUS_EXPR, type, fd->step, t); t = fold_build2 (PLUS_EXPR, type, t, fd->n2); t = fold_build2 (MINUS_EXPR, type, t, fd->n1); t = fold_build2 (TRUNC_DIV_EXPR, type, t, fd->step); t = fold_convert (type, t); if (is_gimple_val (t)) n = t; else n = get_formal_tmp_var (t, &list); t = build_int_cst (type, 0); trip = get_initialized_tmp_var (t, &list, NULL); si = bsi_last (entry_bb); gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_FOR); bsi_insert_after (&si, list, BSI_SAME_STMT); bsi_remove (&si, true); /* Iteration space partitioning goes in ITER_PART_BB. */ list = alloc_stmt_list (); t = build2 (MULT_EXPR, type, trip, nthreads); t = build2 (PLUS_EXPR, type, t, threadid); t = build2 (MULT_EXPR, type, t, fd->chunk_size); s0 = get_formal_tmp_var (t, &list); t = build2 (PLUS_EXPR, type, s0, fd->chunk_size); t = build2 (MIN_EXPR, type, t, n); e0 = get_formal_tmp_var (t, &list); t = build2 (LT_EXPR, boolean_type_node, s0, n); t = build3 (COND_EXPR, void_type_node, t, build_and_jump (&l1), build_and_jump (&l4)); append_to_statement_list (t, &list); si = bsi_start (iter_part_bb); bsi_insert_after (&si, list, BSI_CONTINUE_LINKING); /* Setup code for sequential iteration goes in SEQ_START_BB. */ list = alloc_stmt_list (); t = fold_convert (type, s0); t = build2 (MULT_EXPR, type, t, fd->step); t = build2 (PLUS_EXPR, type, t, fd->n1); t = build2 (MODIFY_EXPR, void_type_node, fd->v, t); gimplify_and_add (t, &list); t = fold_convert (type, e0); t = build2 (MULT_EXPR, type, t, fd->step); t = build2 (PLUS_EXPR, type, t, fd->n1); e = get_formal_tmp_var (t, &list); si = bsi_start (seq_start_bb); bsi_insert_after (&si, list, BSI_CONTINUE_LINKING); /* The code controlling the sequential loop goes in CONT_BB, replacing the OMP_CONTINUE. */ list = alloc_stmt_list (); t = build2 (PLUS_EXPR, type, fd->v, fd->step); t = build2 (MODIFY_EXPR, void_type_node, fd->v, t); gimplify_and_add (t, &list); t = build2 (fd->cond_code, boolean_type_node, fd->v, e); t = get_formal_tmp_var (t, &list); t = build3 (COND_EXPR, void_type_node, t, build_and_jump (&l2), build_and_jump (&l3)); append_to_statement_list (t, &list); si = bsi_last (cont_bb); gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_CONTINUE); bsi_insert_after (&si, list, BSI_SAME_STMT); bsi_remove (&si, true); /* Trip update code goes into TRIP_UPDATE_BB. */ list = alloc_stmt_list (); t = build_int_cst (type, 1); t = build2 (PLUS_EXPR, type, trip, t); t = build2 (MODIFY_EXPR, void_type_node, trip, t); gimplify_and_add (t, &list); si = bsi_start (trip_update_bb); bsi_insert_after (&si, list, BSI_CONTINUE_LINKING); /* Replace the OMP_RETURN with a barrier, or nothing. */ si = bsi_last (exit_bb); if (!OMP_RETURN_NOWAIT (bsi_stmt (si))) { list = alloc_stmt_list (); build_omp_barrier (&list); bsi_insert_after (&si, list, BSI_SAME_STMT); } bsi_remove (&si, true); /* Connect the new blocks. */ remove_edge (single_succ_edge (entry_bb)); make_edge (entry_bb, iter_part_bb, EDGE_FALLTHRU); make_edge (iter_part_bb, seq_start_bb, EDGE_TRUE_VALUE); make_edge (iter_part_bb, fin_bb, EDGE_FALSE_VALUE); make_edge (seq_start_bb, body_bb, EDGE_FALLTHRU); remove_edge (single_succ_edge (cont_bb)); make_edge (cont_bb, body_bb, EDGE_TRUE_VALUE); make_edge (cont_bb, trip_update_bb, EDGE_FALSE_VALUE); make_edge (trip_update_bb, iter_part_bb, EDGE_FALLTHRU); } /* Expand the OpenMP loop defined by REGION. */ static void expand_omp_for (struct omp_region *region) { struct omp_for_data fd; push_gimplify_context (); extract_omp_for_data (last_stmt (region->entry), &fd); region->sched_kind = fd.sched_kind; if (fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC && !fd.have_ordered && region->cont && region->exit) { if (fd.chunk_size == NULL) expand_omp_for_static_nochunk (region, &fd); else expand_omp_for_static_chunk (region, &fd); } else { int fn_index = fd.sched_kind + fd.have_ordered * 4; int start_ix = BUILT_IN_GOMP_LOOP_STATIC_START + fn_index; int next_ix = BUILT_IN_GOMP_LOOP_STATIC_NEXT + fn_index; expand_omp_for_generic (region, &fd, start_ix, next_ix); } pop_gimplify_context (NULL); } /* Expand code for an OpenMP sections directive. In pseudo code, we generate v = GOMP_sections_start (n); L0: switch (v) { case 0: goto L2; case 1: section 1; goto L1; case 2: ... case n: ... default: abort (); } L1: v = GOMP_sections_next (); goto L0; L2: reduction; If this is a combined parallel sections, replace the call to GOMP_sections_start with 'goto L1'. */ static void expand_omp_sections (struct omp_region *region) { tree label_vec, l0, l1, l2, t, u, v, sections_stmt; unsigned i, len; basic_block entry_bb, exit_bb, l0_bb, l1_bb, l2_bb, default_bb; block_stmt_iterator si; struct omp_region *inner; edge e; entry_bb = region->entry; l0_bb = create_empty_bb (entry_bb); l0 = tree_block_label (l0_bb); gcc_assert ((region->cont != NULL) ^ (region->exit == NULL)); l1_bb = region->cont; if (l1_bb) { l2_bb = single_succ (l1_bb); default_bb = create_empty_bb (l1_bb->prev_bb); l1 = tree_block_label (l1_bb); } else { l2_bb = create_empty_bb (l0_bb); default_bb = l2_bb; l1 = NULL; } l2 = tree_block_label (l2_bb); exit_bb = region->exit; v = create_tmp_var (unsigned_type_node, ".section"); /* We will build a switch() with enough cases for all the OMP_SECTION regions, a '0' case to handle the end of more work and a default case to abort if something goes wrong. */ len = EDGE_COUNT (entry_bb->succs); label_vec = make_tree_vec (len + 2); /* The call to GOMP_sections_start goes in ENTRY_BB, replacing the OMP_SECTIONS statement. */ si = bsi_last (entry_bb); sections_stmt = bsi_stmt (si); gcc_assert (TREE_CODE (sections_stmt) == OMP_SECTIONS); if (!is_combined_parallel (region)) { /* If we are not inside a combined parallel+sections region, call GOMP_sections_start. */ t = build_int_cst (unsigned_type_node, len); t = tree_cons (NULL, t, NULL); u = built_in_decls[BUILT_IN_GOMP_SECTIONS_START]; t = build_function_call_expr (u, t); t = build2 (MODIFY_EXPR, void_type_node, v, t); bsi_insert_after (&si, t, BSI_SAME_STMT); } bsi_remove (&si, true); /* The switch() statement replacing OMP_SECTIONS goes in L0_BB. */ si = bsi_start (l0_bb); t = build3 (SWITCH_EXPR, void_type_node, v, NULL, label_vec); bsi_insert_after (&si, t, BSI_CONTINUE_LINKING); t = build3 (CASE_LABEL_EXPR, void_type_node, build_int_cst (unsigned_type_node, 0), NULL, l2); TREE_VEC_ELT (label_vec, 0) = t; make_edge (l0_bb, l2_bb, 0); /* Convert each OMP_SECTION into a CASE_LABEL_EXPR. */ for (inner = region->inner, i = 1; inner; inner = inner->next, ++i) { basic_block s_entry_bb, s_exit_bb; s_entry_bb = inner->entry; s_exit_bb = inner->exit; t = tree_block_label (s_entry_bb); u = build_int_cst (unsigned_type_node, i); u = build3 (CASE_LABEL_EXPR, void_type_node, u, NULL, t); TREE_VEC_ELT (label_vec, i) = u; si = bsi_last (s_entry_bb); gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_SECTION); gcc_assert (i < len || OMP_SECTION_LAST (bsi_stmt (si))); bsi_remove (&si, true); e = single_pred_edge (s_entry_bb); e->flags = 0; redirect_edge_pred (e, l0_bb); single_succ_edge (s_entry_bb)->flags = EDGE_FALLTHRU; if (s_exit_bb == NULL) continue; si = bsi_last (s_exit_bb); gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_RETURN); bsi_remove (&si, true); single_succ_edge (s_exit_bb)->flags = EDGE_FALLTHRU; } /* Error handling code goes in DEFAULT_BB. */ t = tree_block_label (default_bb); u = build3 (CASE_LABEL_EXPR, void_type_node, NULL, NULL, t); TREE_VEC_ELT (label_vec, len + 1) = u; make_edge (l0_bb, default_bb, 0); si = bsi_start (default_bb); t = built_in_decls[BUILT_IN_TRAP]; t = build_function_call_expr (t, NULL); bsi_insert_after (&si, t, BSI_CONTINUE_LINKING); /* Code to get the next section goes in L1_BB. */ if (l1_bb) { si = bsi_last (l1_bb); gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_CONTINUE); t = built_in_decls[BUILT_IN_GOMP_SECTIONS_NEXT]; t = build_function_call_expr (t, NULL); t = build2 (MODIFY_EXPR, void_type_node, v, t); bsi_insert_after (&si, t, BSI_SAME_STMT); bsi_remove (&si, true); } /* Cleanup function replaces OMP_RETURN in EXIT_BB. */ if (exit_bb) { si = bsi_last (exit_bb); if (OMP_RETURN_NOWAIT (bsi_stmt (si))) t = built_in_decls[BUILT_IN_GOMP_SECTIONS_END_NOWAIT]; else t = built_in_decls[BUILT_IN_GOMP_SECTIONS_END]; t = build_function_call_expr (t, NULL); bsi_insert_after (&si, t, BSI_SAME_STMT); bsi_remove (&si, true); } /* Connect the new blocks. */ if (is_combined_parallel (region)) { /* If this was a combined parallel+sections region, we did not emit a GOMP_sections_start in the entry block, so we just need to jump to L1_BB to get the next section. */ make_edge (entry_bb, l1_bb, EDGE_FALLTHRU); } else make_edge (entry_bb, l0_bb, EDGE_FALLTHRU); if (l1_bb) { e = single_succ_edge (l1_bb); redirect_edge_succ (e, l0_bb); e->flags = EDGE_FALLTHRU; } } /* Expand code for an OpenMP single directive. We've already expanded much of the code, here we simply place the GOMP_barrier call. */ static void expand_omp_single (struct omp_region *region) { basic_block entry_bb, exit_bb; block_stmt_iterator si; bool need_barrier = false; entry_bb = region->entry; exit_bb = region->exit; si = bsi_last (entry_bb); /* The terminal barrier at the end of a GOMP_single_copy sequence cannot be removed. We need to ensure that the thread that entered the single does not exit before the data is copied out by the other threads. */ if (find_omp_clause (OMP_SINGLE_CLAUSES (bsi_stmt (si)), OMP_CLAUSE_COPYPRIVATE)) need_barrier = true; gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_SINGLE); bsi_remove (&si, true); single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU; si = bsi_last (exit_bb); if (!OMP_RETURN_NOWAIT (bsi_stmt (si)) || need_barrier) { tree t = alloc_stmt_list (); build_omp_barrier (&t); bsi_insert_after (&si, t, BSI_SAME_STMT); } bsi_remove (&si, true); single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU; } /* Generic expansion for OpenMP synchronization directives: master, ordered and critical. All we need to do here is remove the entry and exit markers for REGION. */ static void expand_omp_synch (struct omp_region *region) { basic_block entry_bb, exit_bb; block_stmt_iterator si; entry_bb = region->entry; exit_bb = region->exit; si = bsi_last (entry_bb); gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_SINGLE || TREE_CODE (bsi_stmt (si)) == OMP_MASTER || TREE_CODE (bsi_stmt (si)) == OMP_ORDERED || TREE_CODE (bsi_stmt (si)) == OMP_CRITICAL); bsi_remove (&si, true); single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU; if (exit_bb) { si = bsi_last (exit_bb); gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_RETURN); bsi_remove (&si, true); single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU; } } /* Expand the parallel region tree rooted at REGION. Expansion proceeds in depth-first order. Innermost regions are expanded first. This way, parallel regions that require a new function to be created (e.g., OMP_PARALLEL) can be expanded without having any internal dependencies in their body. */ static void expand_omp (struct omp_region *region) { while (region) { if (region->inner) expand_omp (region->inner); switch (region->type) { case OMP_PARALLEL: expand_omp_parallel (region); break; case OMP_FOR: expand_omp_for (region); break; case OMP_SECTIONS: expand_omp_sections (region); break; case OMP_SECTION: /* Individual omp sections are handled together with their parent OMP_SECTIONS region. */ break; case OMP_SINGLE: expand_omp_single (region); break; case OMP_MASTER: case OMP_ORDERED: case OMP_CRITICAL: expand_omp_synch (region); break; default: gcc_unreachable (); } region = region->next; } } /* Helper for build_omp_regions. Scan the dominator tree starting at block BB. PARENT is the region that contains BB. */ static void build_omp_regions_1 (basic_block bb, struct omp_region *parent) { block_stmt_iterator si; tree stmt; basic_block son; si = bsi_last (bb); if (!bsi_end_p (si) && OMP_DIRECTIVE_P (bsi_stmt (si))) { struct omp_region *region; enum tree_code code; stmt = bsi_stmt (si); code = TREE_CODE (stmt); if (code == OMP_RETURN) { /* STMT is the return point out of region PARENT. Mark it as the exit point and make PARENT the immediately enclosing region. */ gcc_assert (parent); region = parent; region->exit = bb; parent = parent->outer; /* If REGION is a parallel region, determine whether it is a combined parallel+workshare region. */ if (region->type == OMP_PARALLEL) determine_parallel_type (region); } else if (code == OMP_CONTINUE) { gcc_assert (parent); parent->cont = bb; } else { /* Otherwise, this directive becomes the parent for a new region. */ region = new_omp_region (bb, code, parent); parent = region; } } for (son = first_dom_son (CDI_DOMINATORS, bb); son; son = next_dom_son (CDI_DOMINATORS, son)) build_omp_regions_1 (son, parent); } /* Scan the CFG and build a tree of OMP regions. Return the root of the OMP region tree. */ static void build_omp_regions (void) { gcc_assert (root_omp_region == NULL); calculate_dominance_info (CDI_DOMINATORS); build_omp_regions_1 (ENTRY_BLOCK_PTR, NULL); } /* Main entry point for expanding OMP-GIMPLE into runtime calls. */ static unsigned int execute_expand_omp (void) { build_omp_regions (); if (!root_omp_region) return 0; if (dump_file) { fprintf (dump_file, "\nOMP region tree\n\n"); dump_omp_region (dump_file, root_omp_region, 0); fprintf (dump_file, "\n"); } remove_exit_barriers (root_omp_region); expand_omp (root_omp_region); free_dominance_info (CDI_DOMINATORS); free_dominance_info (CDI_POST_DOMINATORS); cleanup_tree_cfg (); free_omp_regions (); return 0; } static bool gate_expand_omp (void) { return flag_openmp != 0 && errorcount == 0; } struct tree_opt_pass pass_expand_omp = { "ompexp", /* name */ gate_expand_omp, /* gate */ execute_expand_omp, /* execute */ NULL, /* sub */ NULL, /* next */ 0, /* static_pass_number */ 0, /* tv_id */ PROP_gimple_any, /* properties_required */ PROP_gimple_lomp, /* properties_provided */ 0, /* properties_destroyed */ 0, /* todo_flags_start */ TODO_dump_func, /* todo_flags_finish */ 0 /* letter */ }; /* Routines to lower OpenMP directives into OMP-GIMPLE. */ /* Lower the OpenMP sections directive in *STMT_P. */ static void lower_omp_sections (tree *stmt_p, omp_context *ctx) { tree new_stmt, stmt, body, bind, block, ilist, olist, new_body; tree t, dlist; tree_stmt_iterator tsi; unsigned i, len; stmt = *stmt_p; push_gimplify_context (); dlist = NULL; ilist = NULL; lower_rec_input_clauses (OMP_SECTIONS_CLAUSES (stmt), &ilist, &dlist, ctx); tsi = tsi_start (OMP_SECTIONS_BODY (stmt)); for (len = 0; !tsi_end_p (tsi); len++, tsi_next (&tsi)) continue; tsi = tsi_start (OMP_SECTIONS_BODY (stmt)); body = alloc_stmt_list (); for (i = 0; i < len; i++, tsi_next (&tsi)) { omp_context *sctx; tree sec_start, sec_end; sec_start = tsi_stmt (tsi); sctx = maybe_lookup_ctx (sec_start); gcc_assert (sctx); append_to_statement_list (sec_start, &body); lower_omp (&OMP_SECTION_BODY (sec_start), sctx); append_to_statement_list (OMP_SECTION_BODY (sec_start), &body); OMP_SECTION_BODY (sec_start) = NULL; if (i == len - 1) { tree l = alloc_stmt_list (); lower_lastprivate_clauses (OMP_SECTIONS_CLAUSES (stmt), NULL, &l, ctx); append_to_statement_list (l, &body); OMP_SECTION_LAST (sec_start) = 1; } sec_end = make_node (OMP_RETURN); append_to_statement_list (sec_end, &body); } block = make_node (BLOCK); bind = build3 (BIND_EXPR, void_type_node, NULL, body, block); olist = NULL_TREE; lower_reduction_clauses (OMP_SECTIONS_CLAUSES (stmt), &olist, ctx); pop_gimplify_context (NULL_TREE); record_vars_into (ctx->block_vars, ctx->cb.dst_fn); new_stmt = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL); TREE_SIDE_EFFECTS (new_stmt) = 1; new_body = alloc_stmt_list (); append_to_statement_list (ilist, &new_body); append_to_statement_list (stmt, &new_body); append_to_statement_list (bind, &new_body); t = make_node (OMP_CONTINUE); append_to_statement_list (t, &new_body); append_to_statement_list (olist, &new_body); append_to_statement_list (dlist, &new_body); maybe_catch_exception (&new_body); t = make_node (OMP_RETURN); OMP_RETURN_NOWAIT (t) = !!find_omp_clause (OMP_SECTIONS_CLAUSES (stmt), OMP_CLAUSE_NOWAIT); append_to_statement_list (t, &new_body); BIND_EXPR_BODY (new_stmt) = new_body; OMP_SECTIONS_BODY (stmt) = NULL; *stmt_p = new_stmt; } /* A subroutine of lower_omp_single. Expand the simple form of an OMP_SINGLE, without a copyprivate clause: if (GOMP_single_start ()) BODY; [ GOMP_barrier (); ] -> unless 'nowait' is present. FIXME. It may be better to delay expanding the logic of this until pass_expand_omp. The expanded logic may make the job more difficult to a synchronization analysis pass. */ static void lower_omp_single_simple (tree single_stmt, tree *pre_p) { tree t; t = built_in_decls[BUILT_IN_GOMP_SINGLE_START]; t = build_function_call_expr (t, NULL); t = build3 (COND_EXPR, void_type_node, t, OMP_SINGLE_BODY (single_stmt), NULL); gimplify_and_add (t, pre_p); } /* A subroutine of lower_omp_single. Expand the simple form of an OMP_SINGLE, with a copyprivate clause: #pragma omp single copyprivate (a, b, c) Create a new structure to hold copies of 'a', 'b' and 'c' and emit: { if ((copyout_p = GOMP_single_copy_start ()) == NULL) { BODY; copyout.a = a; copyout.b = b; copyout.c = c; GOMP_single_copy_end (&copyout); } else { a = copyout_p->a; b = copyout_p->b; c = copyout_p->c; } GOMP_barrier (); } FIXME. It may be better to delay expanding the logic of this until pass_expand_omp. The expanded logic may make the job more difficult to a synchronization analysis pass. */ static void lower_omp_single_copy (tree single_stmt, tree *pre_p, omp_context *ctx) { tree ptr_type, t, args, l0, l1, l2, copyin_seq; ctx->sender_decl = create_tmp_var (ctx->record_type, ".omp_copy_o"); ptr_type = build_pointer_type (ctx->record_type); ctx->receiver_decl = create_tmp_var (ptr_type, ".omp_copy_i"); l0 = create_artificial_label (); l1 = create_artificial_label (); l2 = create_artificial_label (); t = built_in_decls[BUILT_IN_GOMP_SINGLE_COPY_START]; t = build_function_call_expr (t, NULL); t = fold_convert (ptr_type, t); t = build2 (MODIFY_EXPR, void_type_node, ctx->receiver_decl, t); gimplify_and_add (t, pre_p); t = build2 (EQ_EXPR, boolean_type_node, ctx->receiver_decl, build_int_cst (ptr_type, 0)); t = build3 (COND_EXPR, void_type_node, t, build_and_jump (&l0), build_and_jump (&l1)); gimplify_and_add (t, pre_p); t = build1 (LABEL_EXPR, void_type_node, l0); gimplify_and_add (t, pre_p); append_to_statement_list (OMP_SINGLE_BODY (single_stmt), pre_p); copyin_seq = NULL; lower_copyprivate_clauses (OMP_SINGLE_CLAUSES (single_stmt), pre_p, &copyin_seq, ctx); t = build_fold_addr_expr (ctx->sender_decl); args = tree_cons (NULL, t, NULL); t = built_in_decls[BUILT_IN_GOMP_SINGLE_COPY_END]; t = build_function_call_expr (t, args); gimplify_and_add (t, pre_p); t = build_and_jump (&l2); gimplify_and_add (t, pre_p); t = build1 (LABEL_EXPR, void_type_node, l1); gimplify_and_add (t, pre_p); append_to_statement_list (copyin_seq, pre_p); t = build1 (LABEL_EXPR, void_type_node, l2); gimplify_and_add (t, pre_p); } /* Expand code for an OpenMP single directive. */ static void lower_omp_single (tree *stmt_p, omp_context *ctx) { tree t, bind, block, single_stmt = *stmt_p, dlist; push_gimplify_context (); block = make_node (BLOCK); *stmt_p = bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, block); TREE_SIDE_EFFECTS (bind) = 1; lower_rec_input_clauses (OMP_SINGLE_CLAUSES (single_stmt), &BIND_EXPR_BODY (bind), &dlist, ctx); lower_omp (&OMP_SINGLE_BODY (single_stmt), ctx); append_to_statement_list (single_stmt, &BIND_EXPR_BODY (bind)); if (ctx->record_type) lower_omp_single_copy (single_stmt, &BIND_EXPR_BODY (bind), ctx); else lower_omp_single_simple (single_stmt, &BIND_EXPR_BODY (bind)); OMP_SINGLE_BODY (single_stmt) = NULL; append_to_statement_list (dlist, &BIND_EXPR_BODY (bind)); maybe_catch_exception (&BIND_EXPR_BODY (bind)); t = make_node (OMP_RETURN); OMP_RETURN_NOWAIT (t) = !!find_omp_clause (OMP_SINGLE_CLAUSES (single_stmt), OMP_CLAUSE_NOWAIT); append_to_statement_list (t, &BIND_EXPR_BODY (bind)); pop_gimplify_context (bind); BIND_EXPR_VARS (bind) = chainon (BIND_EXPR_VARS (bind), ctx->block_vars); BLOCK_VARS (block) = BIND_EXPR_VARS (bind); } /* Expand code for an OpenMP master directive. */ static void lower_omp_master (tree *stmt_p, omp_context *ctx) { tree bind, block, stmt = *stmt_p, lab = NULL, x; push_gimplify_context (); block = make_node (BLOCK); *stmt_p = bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, block); TREE_SIDE_EFFECTS (bind) = 1; append_to_statement_list (stmt, &BIND_EXPR_BODY (bind)); x = built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM]; x = build_function_call_expr (x, NULL); x = build2 (EQ_EXPR, boolean_type_node, x, integer_zero_node); x = build3 (COND_EXPR, void_type_node, x, NULL, build_and_jump (&lab)); gimplify_and_add (x, &BIND_EXPR_BODY (bind)); lower_omp (&OMP_MASTER_BODY (stmt), ctx); maybe_catch_exception (&OMP_MASTER_BODY (stmt)); append_to_statement_list (OMP_MASTER_BODY (stmt), &BIND_EXPR_BODY (bind)); OMP_MASTER_BODY (stmt) = NULL; x = build1 (LABEL_EXPR, void_type_node, lab); gimplify_and_add (x, &BIND_EXPR_BODY (bind)); x = make_node (OMP_RETURN); OMP_RETURN_NOWAIT (x) = 1; append_to_statement_list (x, &BIND_EXPR_BODY (bind)); pop_gimplify_context (bind); BIND_EXPR_VARS (bind) = chainon (BIND_EXPR_VARS (bind), ctx->block_vars); BLOCK_VARS (block) = BIND_EXPR_VARS (bind); } /* Expand code for an OpenMP ordered directive. */ static void lower_omp_ordered (tree *stmt_p, omp_context *ctx) { tree bind, block, stmt = *stmt_p, x; push_gimplify_context (); block = make_node (BLOCK); *stmt_p = bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, block); TREE_SIDE_EFFECTS (bind) = 1; append_to_statement_list (stmt, &BIND_EXPR_BODY (bind)); x = built_in_decls[BUILT_IN_GOMP_ORDERED_START]; x = build_function_call_expr (x, NULL); gimplify_and_add (x, &BIND_EXPR_BODY (bind)); lower_omp (&OMP_ORDERED_BODY (stmt), ctx); maybe_catch_exception (&OMP_ORDERED_BODY (stmt)); append_to_statement_list (OMP_ORDERED_BODY (stmt), &BIND_EXPR_BODY (bind)); OMP_ORDERED_BODY (stmt) = NULL; x = built_in_decls[BUILT_IN_GOMP_ORDERED_END]; x = build_function_call_expr (x, NULL); gimplify_and_add (x, &BIND_EXPR_BODY (bind)); x = make_node (OMP_RETURN); OMP_RETURN_NOWAIT (x) = 1; append_to_statement_list (x, &BIND_EXPR_BODY (bind)); pop_gimplify_context (bind); BIND_EXPR_VARS (bind) = chainon (BIND_EXPR_VARS (bind), ctx->block_vars); BLOCK_VARS (block) = BIND_EXPR_VARS (bind); } /* Gimplify an OMP_CRITICAL statement. This is a relatively simple substitution of a couple of function calls. But in the NAMED case, requires that languages coordinate a symbol name. It is therefore best put here in common code. */ static GTY((param1_is (tree), param2_is (tree))) splay_tree critical_name_mutexes; static void lower_omp_critical (tree *stmt_p, omp_context *ctx) { tree bind, block, stmt = *stmt_p; tree t, lock, unlock, name; name = OMP_CRITICAL_NAME (stmt); if (name) { tree decl, args; splay_tree_node n; if (!critical_name_mutexes) critical_name_mutexes = splay_tree_new_ggc (splay_tree_compare_pointers); n = splay_tree_lookup (critical_name_mutexes, (splay_tree_key) name); if (n == NULL) { char *new_str; decl = create_tmp_var_raw (ptr_type_node, NULL); new_str = ACONCAT ((".gomp_critical_user_", IDENTIFIER_POINTER (name), NULL)); DECL_NAME (decl) = get_identifier (new_str); TREE_PUBLIC (decl) = 1; TREE_STATIC (decl) = 1; DECL_COMMON (decl) = 1; DECL_ARTIFICIAL (decl) = 1; DECL_IGNORED_P (decl) = 1; cgraph_varpool_finalize_decl (decl); splay_tree_insert (critical_name_mutexes, (splay_tree_key) name, (splay_tree_value) decl); } else decl = (tree) n->value; args = tree_cons (NULL, build_fold_addr_expr (decl), NULL); lock = built_in_decls[BUILT_IN_GOMP_CRITICAL_NAME_START]; lock = build_function_call_expr (lock, args); args = tree_cons (NULL, build_fold_addr_expr (decl), NULL); unlock = built_in_decls[BUILT_IN_GOMP_CRITICAL_NAME_END]; unlock = build_function_call_expr (unlock, args); } else { lock = built_in_decls[BUILT_IN_GOMP_CRITICAL_START]; lock = build_function_call_expr (lock, NULL); unlock = built_in_decls[BUILT_IN_GOMP_CRITICAL_END]; unlock = build_function_call_expr (unlock, NULL); } push_gimplify_context (); block = make_node (BLOCK); *stmt_p = bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, block); TREE_SIDE_EFFECTS (bind) = 1; append_to_statement_list (stmt, &BIND_EXPR_BODY (bind)); gimplify_and_add (lock, &BIND_EXPR_BODY (bind)); lower_omp (&OMP_CRITICAL_BODY (stmt), ctx); maybe_catch_exception (&OMP_CRITICAL_BODY (stmt)); append_to_statement_list (OMP_CRITICAL_BODY (stmt), &BIND_EXPR_BODY (bind)); OMP_CRITICAL_BODY (stmt) = NULL; gimplify_and_add (unlock, &BIND_EXPR_BODY (bind)); t = make_node (OMP_RETURN); OMP_RETURN_NOWAIT (t) = 1; append_to_statement_list (t, &BIND_EXPR_BODY (bind)); pop_gimplify_context (bind); BIND_EXPR_VARS (bind) = chainon (BIND_EXPR_VARS (bind), ctx->block_vars); BLOCK_VARS (block) = BIND_EXPR_VARS (bind); } /* A subroutine of lower_omp_for. Generate code to emit the predicate for a lastprivate clause. Given a loop control predicate of (V cond N2), we gate the clause on (!(V cond N2)). The lowered form is appended to *DLIST, iterator initialization is appended to *BODY_P. */ static void lower_omp_for_lastprivate (struct omp_for_data *fd, tree *body_p, tree *dlist, struct omp_context *ctx) { tree clauses, cond, stmts, vinit, t; enum tree_code cond_code; cond_code = fd->cond_code; cond_code = cond_code == LT_EXPR ? GE_EXPR : LE_EXPR; /* When possible, use a strict equality expression. This can let VRP type optimizations deduce the value and remove a copy. */ if (host_integerp (fd->step, 0)) { HOST_WIDE_INT step = TREE_INT_CST_LOW (fd->step); if (step == 1 || step == -1) cond_code = EQ_EXPR; } cond = build2 (cond_code, boolean_type_node, fd->v, fd->n2); clauses = OMP_FOR_CLAUSES (fd->for_stmt); stmts = NULL; lower_lastprivate_clauses (clauses, cond, &stmts, ctx); if (stmts != NULL) { append_to_statement_list (stmts, dlist); /* Optimize: v = 0; is usually cheaper than v = some_other_constant. */ vinit = fd->n1; if (cond_code == EQ_EXPR && host_integerp (fd->n2, 0) && ! integer_zerop (fd->n2)) vinit = build_int_cst (TREE_TYPE (fd->v), 0); /* Initialize the iterator variable, so that threads that don't execute any iterations don't execute the lastprivate clauses by accident. */ t = build2 (MODIFY_EXPR, void_type_node, fd->v, vinit); gimplify_and_add (t, body_p); } } /* Lower code for an OpenMP loop directive. */ static void lower_omp_for (tree *stmt_p, omp_context *ctx) { tree t, stmt, ilist, dlist, new_stmt, *body_p, *rhs_p; struct omp_for_data fd; stmt = *stmt_p; push_gimplify_context (); lower_omp (&OMP_FOR_PRE_BODY (stmt), ctx); lower_omp (&OMP_FOR_BODY (stmt), ctx); /* Move declaration of temporaries in the loop body before we make it go away. */ if (TREE_CODE (OMP_FOR_BODY (stmt)) == BIND_EXPR) record_vars_into (BIND_EXPR_VARS (OMP_FOR_BODY (stmt)), ctx->cb.dst_fn); new_stmt = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL); TREE_SIDE_EFFECTS (new_stmt) = 1; body_p = &BIND_EXPR_BODY (new_stmt); /* The pre-body and input clauses go before the lowered OMP_FOR. */ ilist = NULL; dlist = NULL; append_to_statement_list (OMP_FOR_PRE_BODY (stmt), body_p); lower_rec_input_clauses (OMP_FOR_CLAUSES (stmt), body_p, &dlist, ctx); /* Lower the header expressions. At this point, we can assume that the header is of the form: #pragma omp for (V = VAL1; V {<|>|<=|>=} VAL2; V = V [+-] VAL3) We just need to make sure that VAL1, VAL2 and VAL3 are lowered using the .omp_data_s mapping, if needed. */ rhs_p = &TREE_OPERAND (OMP_FOR_INIT (stmt), 1); if (!is_gimple_min_invariant (*rhs_p)) *rhs_p = get_formal_tmp_var (*rhs_p, body_p); rhs_p = &TREE_OPERAND (OMP_FOR_COND (stmt), 1); if (!is_gimple_min_invariant (*rhs_p)) *rhs_p = get_formal_tmp_var (*rhs_p, body_p); rhs_p = &TREE_OPERAND (TREE_OPERAND (OMP_FOR_INCR (stmt), 1), 1); if (!is_gimple_min_invariant (*rhs_p)) *rhs_p = get_formal_tmp_var (*rhs_p, body_p); /* Once lowered, extract the bounds and clauses. */ extract_omp_for_data (stmt, &fd); lower_omp_for_lastprivate (&fd, body_p, &dlist, ctx); append_to_statement_list (stmt, body_p); append_to_statement_list (OMP_FOR_BODY (stmt), body_p); t = make_node (OMP_CONTINUE); append_to_statement_list (t, body_p); /* After the loop, add exit clauses. */ lower_reduction_clauses (OMP_FOR_CLAUSES (stmt), body_p, ctx); append_to_statement_list (dlist, body_p); maybe_catch_exception (body_p); /* Region exit marker goes at the end of the loop body. */ t = make_node (OMP_RETURN); OMP_RETURN_NOWAIT (t) = fd.have_nowait; append_to_statement_list (t, body_p); pop_gimplify_context (NULL_TREE); record_vars_into (ctx->block_vars, ctx->cb.dst_fn); OMP_FOR_BODY (stmt) = NULL_TREE; OMP_FOR_PRE_BODY (stmt) = NULL_TREE; *stmt_p = new_stmt; } /* Callback for walk_stmts. Check if *TP only contains OMP_FOR or OMP_PARALLEL. */ static tree check_combined_parallel (tree *tp, int *walk_subtrees, void *data) { struct walk_stmt_info *wi = data; int *info = wi->info; *walk_subtrees = 0; switch (TREE_CODE (*tp)) { case OMP_FOR: case OMP_SECTIONS: *info = *info == 0 ? 1 : -1; break; default: *info = -1; break; } return NULL; } /* Lower the OpenMP parallel directive in *STMT_P. CTX holds context information for the directive. */ static void lower_omp_parallel (tree *stmt_p, omp_context *ctx) { tree clauses, par_bind, par_body, new_body, bind; tree olist, ilist, par_olist, par_ilist; tree stmt, child_fn, t; stmt = *stmt_p; clauses = OMP_PARALLEL_CLAUSES (stmt); par_bind = OMP_PARALLEL_BODY (stmt); par_body = BIND_EXPR_BODY (par_bind); child_fn = ctx->cb.dst_fn; if (!OMP_PARALLEL_COMBINED (stmt)) { struct walk_stmt_info wi; int ws_num = 0; memset (&wi, 0, sizeof (wi)); wi.callback = check_combined_parallel; wi.info = &ws_num; wi.val_only = true; walk_stmts (&wi, &par_bind); if (ws_num == 1) OMP_PARALLEL_COMBINED (stmt) = 1; } push_gimplify_context (); par_olist = NULL_TREE; par_ilist = NULL_TREE; lower_rec_input_clauses (clauses, &par_ilist, &par_olist, ctx); lower_omp (&par_body, ctx); lower_reduction_clauses (clauses, &par_olist, ctx); /* Declare all the variables created by mapping and the variables declared in the scope of the parallel body. */ record_vars_into (ctx->block_vars, child_fn); record_vars_into (BIND_EXPR_VARS (par_bind), child_fn); if (ctx->record_type) { ctx->sender_decl = create_tmp_var (ctx->record_type, ".omp_data_o"); OMP_PARALLEL_DATA_ARG (stmt) = ctx->sender_decl; } olist = NULL_TREE; ilist = NULL_TREE; lower_send_clauses (clauses, &ilist, &olist, ctx); lower_send_shared_vars (&ilist, &olist, ctx); /* Once all the expansions are done, sequence all the different fragments inside OMP_PARALLEL_BODY. */ bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL); append_to_statement_list (ilist, &BIND_EXPR_BODY (bind)); new_body = alloc_stmt_list (); if (ctx->record_type) { t = build_fold_addr_expr (ctx->sender_decl); /* fixup_child_record_type might have changed receiver_decl's type. */ t = fold_convert (TREE_TYPE (ctx->receiver_decl), t); t = build2 (MODIFY_EXPR, void_type_node, ctx->receiver_decl, t); append_to_statement_list (t, &new_body); } append_to_statement_list (par_ilist, &new_body); append_to_statement_list (par_body, &new_body); append_to_statement_list (par_olist, &new_body); maybe_catch_exception (&new_body); t = make_node (OMP_RETURN); append_to_statement_list (t, &new_body); OMP_PARALLEL_BODY (stmt) = new_body; append_to_statement_list (stmt, &BIND_EXPR_BODY (bind)); append_to_statement_list (olist, &BIND_EXPR_BODY (bind)); *stmt_p = bind; pop_gimplify_context (NULL_TREE); } /* Pass *TP back through the gimplifier within the context determined by WI. This handles replacement of DECL_VALUE_EXPR, as well as adjusting the flags on ADDR_EXPR. */ static void lower_regimplify (tree *tp, struct walk_stmt_info *wi) { enum gimplify_status gs; tree pre = NULL; if (wi->is_lhs) gs = gimplify_expr (tp, &pre, NULL, is_gimple_lvalue, fb_lvalue); else if (wi->val_only) gs = gimplify_expr (tp, &pre, NULL, is_gimple_val, fb_rvalue); else gs = gimplify_expr (tp, &pre, NULL, is_gimple_formal_tmp_var, fb_rvalue); gcc_assert (gs == GS_ALL_DONE); if (pre) tsi_link_before (&wi->tsi, pre, TSI_SAME_STMT); } /* Copy EXP into a temporary. Insert the initialization statement before TSI. */ static tree init_tmp_var (tree exp, tree_stmt_iterator *tsi) { tree t, stmt; t = create_tmp_var (TREE_TYPE (exp), NULL); if (TREE_CODE (TREE_TYPE (t)) == COMPLEX_TYPE) DECL_COMPLEX_GIMPLE_REG_P (t) = 1; stmt = build2 (MODIFY_EXPR, TREE_TYPE (t), t, exp); SET_EXPR_LOCUS (stmt, EXPR_LOCUS (tsi_stmt (*tsi))); tsi_link_before (tsi, stmt, TSI_SAME_STMT); return t; } /* Similarly, but copy from the temporary and insert the statement after the iterator. */ static tree save_tmp_var (tree exp, tree_stmt_iterator *tsi) { tree t, stmt; t = create_tmp_var (TREE_TYPE (exp), NULL); if (TREE_CODE (TREE_TYPE (t)) == COMPLEX_TYPE) DECL_COMPLEX_GIMPLE_REG_P (t) = 1; stmt = build2 (MODIFY_EXPR, TREE_TYPE (t), exp, t); SET_EXPR_LOCUS (stmt, EXPR_LOCUS (tsi_stmt (*tsi))); tsi_link_after (tsi, stmt, TSI_SAME_STMT); return t; } /* Callback for walk_stmts. Lower the OpenMP directive pointed by TP. */ static tree lower_omp_1 (tree *tp, int *walk_subtrees, void *data) { struct walk_stmt_info *wi = data; omp_context *ctx = wi->info; tree t = *tp; /* If we have issued syntax errors, avoid doing any heavy lifting. Just replace the OpenMP directives with a NOP to avoid confusing RTL expansion. */ if (errorcount && OMP_DIRECTIVE_P (*tp)) { *tp = build_empty_stmt (); return NULL_TREE; } *walk_subtrees = 0; switch (TREE_CODE (*tp)) { case OMP_PARALLEL: ctx = maybe_lookup_ctx (t); lower_omp_parallel (tp, ctx); break; case OMP_FOR: ctx = maybe_lookup_ctx (t); gcc_assert (ctx); lower_omp_for (tp, ctx); break; case OMP_SECTIONS: ctx = maybe_lookup_ctx (t); gcc_assert (ctx); lower_omp_sections (tp, ctx); break; case OMP_SINGLE: ctx = maybe_lookup_ctx (t); gcc_assert (ctx); lower_omp_single (tp, ctx); break; case OMP_MASTER: ctx = maybe_lookup_ctx (t); gcc_assert (ctx); lower_omp_master (tp, ctx); break; case OMP_ORDERED: ctx = maybe_lookup_ctx (t); gcc_assert (ctx); lower_omp_ordered (tp, ctx); break; case OMP_CRITICAL: ctx = maybe_lookup_ctx (t); gcc_assert (ctx); lower_omp_critical (tp, ctx); break; case VAR_DECL: if (ctx && DECL_HAS_VALUE_EXPR_P (t)) { lower_regimplify (&t, wi); if (wi->val_only) { if (wi->is_lhs) t = save_tmp_var (t, &wi->tsi); else t = init_tmp_var (t, &wi->tsi); } *tp = t; } break; case ADDR_EXPR: if (ctx) lower_regimplify (tp, wi); break; case ARRAY_REF: case ARRAY_RANGE_REF: case REALPART_EXPR: case IMAGPART_EXPR: case COMPONENT_REF: case VIEW_CONVERT_EXPR: if (ctx) lower_regimplify (tp, wi); break; case INDIRECT_REF: if (ctx) { wi->is_lhs = false; wi->val_only = true; lower_regimplify (&TREE_OPERAND (t, 0), wi); } break; default: if (!TYPE_P (t) && !DECL_P (t)) *walk_subtrees = 1; break; } return NULL_TREE; } static void lower_omp (tree *stmt_p, omp_context *ctx) { struct walk_stmt_info wi; memset (&wi, 0, sizeof (wi)); wi.callback = lower_omp_1; wi.info = ctx; wi.val_only = true; wi.want_locations = true; walk_stmts (&wi, stmt_p); } /* Main entry point. */ static unsigned int execute_lower_omp (void) { all_contexts = splay_tree_new (splay_tree_compare_pointers, 0, delete_omp_context); scan_omp (&DECL_SAVED_TREE (current_function_decl), NULL); gcc_assert (parallel_nesting_level == 0); if (all_contexts->root) lower_omp (&DECL_SAVED_TREE (current_function_decl), NULL); if (all_contexts) { splay_tree_delete (all_contexts); all_contexts = NULL; } return 0; } static bool gate_lower_omp (void) { return flag_openmp != 0; } struct tree_opt_pass pass_lower_omp = { "omplower", /* name */ gate_lower_omp, /* gate */ execute_lower_omp, /* execute */ NULL, /* sub */ NULL, /* next */ 0, /* static_pass_number */ 0, /* tv_id */ PROP_gimple_any, /* properties_required */ PROP_gimple_lomp, /* properties_provided */ 0, /* properties_destroyed */ 0, /* todo_flags_start */ TODO_dump_func, /* todo_flags_finish */ 0 /* letter */ }; /* The following is a utility to diagnose OpenMP structured block violations. It is not part of the "omplower" pass, as that's invoked too late. It should be invoked by the respective front ends after gimplification. */ static splay_tree all_labels; /* Check for mismatched contexts and generate an error if needed. Return true if an error is detected. */ static bool diagnose_sb_0 (tree *stmt_p, tree branch_ctx, tree label_ctx) { bool exit_p = true; if ((label_ctx ? TREE_VALUE (label_ctx) : NULL) == branch_ctx) return false; /* Try to avoid confusing the user by producing and error message with correct "exit" or "enter" verbage. We prefer "exit" unless we can show that LABEL_CTX is nested within BRANCH_CTX. */ if (branch_ctx == NULL) exit_p = false; else { while (label_ctx) { if (TREE_VALUE (label_ctx) == branch_ctx) { exit_p = false; break; } label_ctx = TREE_CHAIN (label_ctx); } } if (exit_p) error ("invalid exit from OpenMP structured block"); else error ("invalid entry to OpenMP structured block"); *stmt_p = build_empty_stmt (); return true; } /* Pass 1: Create a minimal tree of OpenMP structured blocks, and record where in the tree each label is found. */ static tree diagnose_sb_1 (tree *tp, int *walk_subtrees, void *data) { struct walk_stmt_info *wi = data; tree context = (tree) wi->info; tree inner_context; tree t = *tp; *walk_subtrees = 0; switch (TREE_CODE (t)) { case OMP_PARALLEL: case OMP_SECTIONS: case OMP_SINGLE: walk_tree (&OMP_CLAUSES (t), diagnose_sb_1, wi, NULL); /* FALLTHRU */ case OMP_SECTION: case OMP_MASTER: case OMP_ORDERED: case OMP_CRITICAL: /* The minimal context here is just a tree of statements. */ inner_context = tree_cons (NULL, t, context); wi->info = inner_context; walk_stmts (wi, &OMP_BODY (t)); wi->info = context; break; case OMP_FOR: walk_tree (&OMP_FOR_CLAUSES (t), diagnose_sb_1, wi, NULL); inner_context = tree_cons (NULL, t, context); wi->info = inner_context; walk_tree (&OMP_FOR_INIT (t), diagnose_sb_1, wi, NULL); walk_tree (&OMP_FOR_COND (t), diagnose_sb_1, wi, NULL); walk_tree (&OMP_FOR_INCR (t), diagnose_sb_1, wi, NULL); walk_stmts (wi, &OMP_FOR_PRE_BODY (t)); walk_stmts (wi, &OMP_FOR_BODY (t)); wi->info = context; break; case LABEL_EXPR: splay_tree_insert (all_labels, (splay_tree_key) LABEL_EXPR_LABEL (t), (splay_tree_value) context); break; default: break; } return NULL_TREE; } /* Pass 2: Check each branch and see if its context differs from that of the destination label's context. */ static tree diagnose_sb_2 (tree *tp, int *walk_subtrees, void *data) { struct walk_stmt_info *wi = data; tree context = (tree) wi->info; splay_tree_node n; tree t = *tp; *walk_subtrees = 0; switch (TREE_CODE (t)) { case OMP_PARALLEL: case OMP_SECTIONS: case OMP_SINGLE: walk_tree (&OMP_CLAUSES (t), diagnose_sb_2, wi, NULL); /* FALLTHRU */ case OMP_SECTION: case OMP_MASTER: case OMP_ORDERED: case OMP_CRITICAL: wi->info = t; walk_stmts (wi, &OMP_BODY (t)); wi->info = context; break; case OMP_FOR: walk_tree (&OMP_FOR_CLAUSES (t), diagnose_sb_2, wi, NULL); wi->info = t; walk_tree (&OMP_FOR_INIT (t), diagnose_sb_2, wi, NULL); walk_tree (&OMP_FOR_COND (t), diagnose_sb_2, wi, NULL); walk_tree (&OMP_FOR_INCR (t), diagnose_sb_2, wi, NULL); walk_stmts (wi, &OMP_FOR_PRE_BODY (t)); walk_stmts (wi, &OMP_FOR_BODY (t)); wi->info = context; break; case GOTO_EXPR: { tree lab = GOTO_DESTINATION (t); if (TREE_CODE (lab) != LABEL_DECL) break; n = splay_tree_lookup (all_labels, (splay_tree_key) lab); diagnose_sb_0 (tp, context, n ? (tree) n->value : NULL_TREE); } break; case SWITCH_EXPR: { tree vec = SWITCH_LABELS (t); int i, len = TREE_VEC_LENGTH (vec); for (i = 0; i < len; ++i) { tree lab = CASE_LABEL (TREE_VEC_ELT (vec, i)); n = splay_tree_lookup (all_labels, (splay_tree_key) lab); if (diagnose_sb_0 (tp, context, (tree) n->value)) break; } } break; case RETURN_EXPR: diagnose_sb_0 (tp, context, NULL_TREE); break; default: break; } return NULL_TREE; } void diagnose_omp_structured_block_errors (tree fndecl) { tree save_current = current_function_decl; struct walk_stmt_info wi; current_function_decl = fndecl; all_labels = splay_tree_new (splay_tree_compare_pointers, 0, 0); memset (&wi, 0, sizeof (wi)); wi.callback = diagnose_sb_1; walk_stmts (&wi, &DECL_SAVED_TREE (fndecl)); memset (&wi, 0, sizeof (wi)); wi.callback = diagnose_sb_2; wi.want_locations = true; wi.want_return_expr = true; walk_stmts (&wi, &DECL_SAVED_TREE (fndecl)); splay_tree_delete (all_labels); all_labels = NULL; current_function_decl = save_current; } #include "gt-omp-low.h"
GB_binop__plus_uint32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__plus_uint32) // A.*B function (eWiseMult): GB (_AemultB_08__plus_uint32) // A.*B function (eWiseMult): GB (_AemultB_02__plus_uint32) // A.*B function (eWiseMult): GB (_AemultB_04__plus_uint32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__plus_uint32) // A*D function (colscale): GB (_AxD__plus_uint32) // D*A function (rowscale): GB (_DxB__plus_uint32) // C+=B function (dense accum): GB (_Cdense_accumB__plus_uint32) // C+=b function (dense accum): GB (_Cdense_accumb__plus_uint32) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__plus_uint32) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__plus_uint32) // C=scalar+B GB (_bind1st__plus_uint32) // C=scalar+B' GB (_bind1st_tran__plus_uint32) // C=A+scalar GB (_bind2nd__plus_uint32) // C=A'+scalar GB (_bind2nd_tran__plus_uint32) // C type: uint32_t // A type: uint32_t // B,b type: uint32_t // BinaryOp: cij = (aij + bij) #define GB_ATYPE \ uint32_t #define GB_BTYPE \ uint32_t #define GB_CTYPE \ uint32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint32_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint32_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x + y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_PLUS || GxB_NO_UINT32 || GxB_NO_PLUS_UINT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__plus_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__plus_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__plus_uint32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__plus_uint32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint32_t uint32_t bwork = (*((uint32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__plus_uint32) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__plus_uint32) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__plus_uint32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__plus_uint32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__plus_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__plus_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__plus_uint32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__plus_uint32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t x = (*((uint32_t *) x_input)) ; uint32_t *Bx = (uint32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint32_t bij = GBX (Bx, p, false) ; Cx [p] = (x + bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__plus_uint32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t *Ax = (uint32_t *) Ax_input ; uint32_t y = (*((uint32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint32_t aij = GBX (Ax, p, false) ; Cx [p] = (aij + y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x + aij) ; \ } GrB_Info GB (_bind1st_tran__plus_uint32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t x = (*((const uint32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij + y) ; \ } GrB_Info GB (_bind2nd_tran__plus_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t y = (*((const uint32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unaryop__minv_uint64_uint16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_uint64_uint16 // op(A') function: GB_tran__minv_uint64_uint16 // C type: uint64_t // A type: uint16_t // cast: uint64_t cij = (uint64_t) aij // unaryop: cij = GB_IMINV_UNSIGNED (aij, 64) #define GB_ATYPE \ uint16_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_UNSIGNED (x, 64) ; // casting #define GB_CASTING(z, x) \ uint64_t z = (uint64_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_UINT64 || GxB_NO_UINT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_uint64_uint16 ( uint64_t *restrict Cx, const uint16_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_uint64_uint16 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
nestedfn-5.c
/* { dg-do run } */ extern void abort (void); void foo (int *j) { int i = 5; int bar (void) { return i + 1; } #pragma omp sections { #pragma omp section { if (bar () != 6) #pragma omp atomic ++*j; } #pragma omp section { if (bar () != 6) #pragma omp atomic ++*j; } } } int main (void) { int j = 0; #pragma omp parallel num_threads (2) foo (&j); if (j) abort (); return 0; }
dvjsvd.c
#include "dvjsvd.h" #include "dnormx.h" #include "dscale.h" #include "dnorm2.h" #include "dznrm2.h" #include "ddpscl.h" //#include "dgsscl.h" #include "dbjac2.h" #include "djrotf.h" #include "djrot.h" #include "dswp.h" #include "vecdef.h" #include "defops.h" #ifdef JTRACE #include "timer.h" #endif /* JTRACE */ #ifdef DBL_MAX_ROT_EXP #error DBL_MAX_ROT_EXP already defined #else /* !DBL_MAX_ROT_EXP */ #define DBL_MAX_ROT_EXP 1022 #endif /* ?DBL_MAX_ROT_EXP */ fint dvjsvd_(const fnat m[static restrict 1], const fnat n[static restrict 1], double G[static restrict VDL], const fnat ldG[static restrict 1], double V[static restrict VDL], const fnat ldV[static restrict 1], double eS[static restrict 1], double fS[static restrict 1], const unsigned js[static restrict 1], const unsigned stp[static restrict 1], const unsigned swp[static restrict 1], double work[static restrict VDL], unsigned iwork[static restrict 1]) { const fnat n_2 = (*n >> 1u); if (IS_NOT_VFPENV) return -14; if (!*n) return 0; if (*m < *n) return -1; if (*m & VDL_1) return -1; if (*n & 1u) return -2; if (n_2 & VDL_1) return -2; if (IS_NOT_ALIGNED(G)) return -3; if (*ldG < *m) return -4; if (*ldG & VDL_1) return -4; if (IS_NOT_ALIGNED(V)) return -5; if (*ldV < *n) return -6; if (*ldV & VDL_1) return -6; if (IS_NOT_ALIGNED(work)) return -12; #ifdef JTRACE FILE *const jtr = fopen((const char*)work, "w"); if (!jtr) return -13; (void)fprintf(jtr, "M="); (void)fflush(jtr); #endif /* JTRACE */ double M = dnormx_(m, n, G, ldG); if (!(M <= DBL_MAX)) return -15; if (copysign(1.0, M) == -1.0) return -16; #ifdef JTRACE (void)fprintf(jtr, "%#.17e\n", M); (void)fflush(jtr); #endif /* JTRACE */ #ifdef _OPENMP #pragma omp parallel for default(none) shared(n,V,ldV,eS,fS) #endif /* _OPENMP */ for (fnat j = 0u; j < *n; ++j) { register const VD z = _mm512_setzero_pd(); double *const Vj = V + j * (size_t)(*ldV); for (fnat i = 0u; i < *n; i += VDL) _mm512_store_pd((Vj + i), z); fS[j] = Vj[j] = 1.0; eS[j] = -HUGE_VAL; } if (M == 0.0) return 0; const double M_m = (DBL_MAX / (*m << 1u)); double es = 0.0, fs = 0.0; dbl2ef(M_m, &es, &fs); const int DBL_MAX_NRM_EXP = (int)es; dbl2ef(M, &es, &fs); int eM = (int)es; int sR = DBL_MAX_ROT_EXP - eM; int sN = DBL_MAX_NRM_EXP - eM - 1; #ifdef JTRACE (void)fprintf(jtr, "eM=%d, sR=%d, sN=%d, M=", eM, sR, sN); (void)fflush(jtr); #endif /* JTRACE */ if (sN) { *(fint*)&es = sN; if (dscale_(m, n, G, ldG, (const fint*)&es) < 0) return -17; M = scalbn(M, sN); } int sT = sN; #ifdef JTRACE (void)fprintf(jtr, "%#.17e\n", M); (void)fflush(jtr); #endif /* JTRACE */ const fnat n_16 = (n_2 >> VDLlg); double *const a11 = work; double *const a22 = a11 + n_2; double *const a21 = a22 + n_2; double *const c = a21 + n_2; double *const at = c + n_2; double *const l1 = at + n_2; double *const l2 = l1 + n_2; double *const w = l2 + n_2; unsigned *const p = iwork; unsigned *const pc = p + n_16; #ifndef DGSSCL_H if (*swp) { #ifdef _OPENMP #pragma omp parallel for default(none) shared(l1,n) #endif /* _OPENMP */ for (fnat i = 0u; i < *n; ++i) l1[i] = 1.0; } #endif /* !DGSSCL_H */ // see LAPACK's DGESVJ const double tol = sqrt((double)(*m)) * scalbn(DBL_EPSILON, -1); const double gst = scalb(tol, DBL_MAX_FIN_EXP); unsigned sw = 0u; #ifdef JTRACE unsigned rd[2u] = { 0u, 0u }; const uint64_t hz = tsc_get_freq_hz_(rd); long double Tn = 0.0L, Tp = 0.0L, Ta = 0.0L, Te = 0.0L, Tr = 0.0L; uint64_t T = UINT64_C(0); #endif /* JTRACE */ while (sw < *swp) { size_t swt = 0u; for (unsigned st = 0u; st < *stp; ++st) { // rescale according to M if necessary and update M dbl2ef(M, &es, &fs); eM = (int)es; sR = DBL_MAX_ROT_EXP - eM; sN = DBL_MAX_NRM_EXP - eM - 1; if (sR < 0) { #ifdef JTRACE (void)fprintf(jtr, "sweep=%u, step=%u, eM=%d, sR=%d, sN=%d, M=", sw, st, eM, sR, sN); (void)fflush(jtr); #endif /* JTRACE */ *(fint*)&es = sN; if (dscale_(m, n, G, ldG, (const fint*)&es) < 0) return -18; M = scalbn(M, sN); sT += sN; #ifndef DGSSCL_H #ifdef _OPENMP #pragma omp parallel for default(none) shared(l1,n) #endif /* _OPENMP */ for (fnat i = 0u; i < *n; ++i) l1[i] = 1.0; #endif /* !DGSSCL_H */ #ifdef JTRACE (void)fprintf(jtr, "%#.17e\n", M); (void)fflush(jtr); #endif /* JTRACE */ } // compute the norms, overflow-aware const unsigned *const r = js + st * (size_t)(*n); double nM = -0.0; bool overflow = false; do { #ifdef JTRACE T = rdtsc_beg(rd); #endif /* JTRACE */ nM = 0.0; #ifdef _OPENMP #pragma omp parallel for default(none) shared(n,r,m,G,ldG,eS,fS,a11,a22,c,at,l1) reduction(max:nM) #endif /* _OPENMP */ for (fnat pq = 0u; pq < *n; pq += 2u) { const fnat _pq = (pq >> 1u); if (!(nM <= DBL_MAX)) { a11[_pq] = NAN; a22[_pq] = NAN; continue; } const fnat pq_ = pq + 1u; const size_t _p = r[pq]; const size_t _q = r[pq_]; #ifndef DGSSCL_H if (l1[_p] == 1.0) { #endif /* !DGSSCL_H */ double *const Gp = G + _p * (*ldG); nM = fmax(nM, fmin((a11[_pq] = dnorm2_(m, Gp, (eS + _p), (fS + _p), (c + _pq), (at + _pq))), HUGE_VAL)); if (!(nM <= DBL_MAX)) { a22[_pq] = NAN; continue; } #ifndef DGSSCL_H } if (l1[_q] == 1.0) { #endif /* !DGSSCL_H */ double *const Gq = G + _q * (*ldG); nM = fmax(nM, fmin((a22[_pq] = dnorm2_(m, Gq, (eS + _q), (fS + _q), (c + _pq), (at + _pq))), HUGE_VAL)); #ifndef DGSSCL_H } #endif /* !DGSSCL_H */ } #ifdef JTRACE Tn += tsc_lap(hz, T, rdtsc_end(rd)); #endif /* JTRACE */ if (overflow = !(nM <= DBL_MAX)) { #ifdef JTRACE (void)fprintf(jtr, "sweep=%u, step=%u, M=", sw, st); (void)fflush(jtr); #endif /* JTRACE */ *(fint*)&es = sN; if (dscale_(m, n, G, ldG, (const fint*)&es) < 0) return -19; M = scalbn(M, sN); sT += sN; #ifndef DGSSCL_H #ifdef _OPENMP #pragma omp parallel for default(none) shared(l1,n) #endif /* _OPENMP */ for (fnat i = 0u; i < *n; ++i) l1[i] = 1.0; #endif /* !DGSSCL_H */ #ifdef JTRACE (void)fprintf(jtr, "%#.17e\n", M); (void)fflush(jtr); #endif /* JTRACE */ } } while (overflow); // scaled dot-products #ifdef JTRACE T = rdtsc_beg(rd); #endif /* JTRACE */ nM = 0.0; #ifdef _OPENMP #pragma omp parallel for default(none) shared(n,r,m,G,ldG,eS,fS,w) reduction(min:nM) #endif /* _OPENMP */ for (fnat pq = 0u; pq < *n; pq += 2u) { const fnat _pq = (pq >> 1u); if (!(nM >= 0.0)) { w[_pq] = NAN; continue; } const fnat pq_ = pq + 1u; const size_t _p = r[pq]; const size_t _q = r[pq_]; // pack the norms const double e2[2u] = { eS[_q], eS[_p] }; const double f2[2u] = { fS[_q], fS[_p] }; double *const Gp = G + _p * (*ldG); double *const Gq = G + _q * (*ldG); w[_pq] = ddpscl_(m, Gq, Gp, e2, f2); if (!(isfinite(w[_pq]))) nM = fmin(nM, -20.0); } #ifdef JTRACE Tp += tsc_lap(hz, T, rdtsc_end(rd)); #endif /* JTRACE */ if (!(nM >= 0.0)) { #ifdef JTRACE (void)fprintf(jtr, "sweep=%u, step=%u\n", sw, st); (void)fflush(jtr); #endif /* JTRACE */ return (fint)nM; } // repack data #ifdef JTRACE T = rdtsc_beg(rd); #endif /* JTRACE */ #ifdef _OPENMP #pragma omp parallel for default(none) shared(n,r,eS,fS,c,at,l1,l2) #endif /* _OPENMP */ for (fnat pq = 0u; pq < *n; pq += 2u) { const fnat pq_ = pq + 1u; const fnat _pq = (pq >> 1u); const size_t _p = r[pq]; const size_t _q = r[pq_]; c[_pq] = eS[_p]; at[_pq] = eS[_q]; l1[_pq] = fS[_p]; l2[_pq] = fS[_q]; } fnat stt = 0u; #ifdef _OPENMP #pragma omp parallel for default(none) shared(n_2,a11,a22,a21,c,at,l1,l2,w,p,pc,tol,gst) reduction(+:stt) #endif /* _OPENMP */ for (fnat i = 0u; i < n_2; i += VDL) { const fnat j = (i >> VDLlg); // convergence check register VD _a21 = _mm512_load_pd(w + i); register const VD _zero = _mm512_set1_pd(-0.0); register const VD zero = _mm512_setzero_pd(); register const VD _tol = _mm512_set1_pd(tol); register const VD _a21_ = VDABS(_a21); pc[j] = MD2U(_mm512_cmple_pd_mask(_tol, _a21_)); if (p[j] = _mm_popcnt_u32(pc[j])) { stt += p[j]; #ifdef DGSSCL_H register VD _a11 = _mm512_load_pd(a11 + i); register VD _a22 = _mm512_load_pd(a22 + i); register const VD _gst = _mm512_set1_pd(gst); // might not yet be sorted, so check both cases pc[j] |= (MD2U(_mm512_cmplt_pd_mask(_mm512_mul_pd(_gst, _a22), _a11)) << VDL); pc[j] |= (MD2U(_mm512_cmplt_pd_mask(_mm512_mul_pd(_gst, _a11), _a22)) << VDL2); #endif /* DGSSCL_H */ // Grammian pre-scaling into the double precision range register const VD f1 = _mm512_load_pd(l1 + i); register const VD f2 = _mm512_load_pd(l2 + i); register const VD e1 = _mm512_load_pd(c + i); register const VD e2 = _mm512_load_pd(at + i); register VD f12 = _mm512_div_pd(f1, f2); register VD e12 = _mm512_sub_pd(e1, e2); register VD f21 = _mm512_div_pd(f2, f1); register VD e21 = _mm512_sub_pd(e2, e1); e12 = _mm512_add_pd(e12, _mm512_getexp_pd(f12)); f12 = VDMANT(f12); e21 = _mm512_add_pd(e21, _mm512_getexp_pd(f21)); f21 = VDMANT(f21); register const MD c12 = VDEFLE(e12,e21,f12,f21); register const VD mxe = _mm512_set1_pd(DBL_MAX_FIN_EXP); register const VD E = _mm512_mask_blend_pd(c12, e12, e21); register const VD d = _mm512_min_pd(_mm512_sub_pd(mxe, E), zero); e12 = _mm512_add_pd(e12, d); e21 = _mm512_add_pd(e21, d); #ifdef DGSSCL_H _a11 = _mm512_scalef_pd(f12, e12); _a22 = _mm512_scalef_pd(f21, e21); #else /* !DGSSCL_H */ register const VD _a11 = _mm512_scalef_pd(f12, e12); register const VD _a22 = _mm512_scalef_pd(f21, e21); #endif /* ?DGSSCL_H */ _a21 = _mm512_scalef_pd(_a21, d); _mm512_store_pd((a11 + i), _a11); _mm512_store_pd((a22 + i), _a22); _mm512_store_pd((a21 + i), _a21); } } swt += stt; #ifdef JTRACE Ta += tsc_lap(hz, T, rdtsc_end(rd)); T = rdtsc_beg(rd); #endif /* JTRACE */ const fint _n_2 = #ifdef USE_SECANTS -(fint)n_2 #else /* !USE_SECANTS */ (fint)n_2 #endif /* ?USE_SECANTS */ ; if (dbjac2i(&_n_2, a11, a22, a21, c, at, l1, l2, p) < 0) return -21; #ifdef JTRACE Te += tsc_lap(hz, T, rdtsc_end(rd)); T = rdtsc_beg(rd); #endif /* JTRACE */ fnat np = 0u; // number of swaps #ifdef _OPENMP #pragma omp parallel for default(none) shared(a11,a22,a21,eS,fS,p,pc,r,n_2) reduction(+:np) #endif /* _OPENMP */ for (fnat i = 0u; i < n_2; i += VDL) { const fnat j = (i >> VDLlg); unsigned gsp = ((pc[j] & 0xFF0000u) >> VDL2); unsigned gsn = ((pc[j] & 0xFF00u) >> VDL); unsigned trans = (pc[j] & 0xFFu); unsigned perm = (p[j] & 0xFFu); for (fnat k = 0u; k < VDL; ++k) { const fnat l = (i + k); const fnat pq = (l << 1u); const uint64_t _p = r[pq]; const uint64_t _q = r[pq + 1u]; *(uint64_t*)(a11 + l) = _p; *(uint64_t*)(a22 + l) = _q; if (gsp & 1u) { a21[l] = -3.0; ++np; } else if (gsn & 1u) a21[l] = 3.0; else if (trans & 1u) { if (perm & 1u) { a21[l] = -2.0; ++np; } else // no swap a21[l] = 2.0; } else if (efcmp((eS + _p), (fS + _p), (eS + _q), (fS + _q)) < 0) { a21[l] = eS[_p]; eS[_p] = eS[_q]; eS[_q] = a21[l]; a21[l] = fS[_p]; fS[_p] = fS[_q]; fS[_q] = a21[l]; a21[l] = -1.0; ++np; } else // no swap a21[l] = 1.0; gsp >>= 1u; gsn >>= 1u; trans >>= 1u; perm >>= 1u; } } nM = 0.0; #ifdef _OPENMP #pragma omp parallel for default(none) shared(m,n,G,ldG,V,ldV,a11,a22,a21,c,at,l1,w,eS,fS,n_2) reduction(max:nM) #endif /* _OPENMP */ for (fnat i = 0u; i < n_2; ++i) { const size_t _p = *(const uint64_t*)(a11 + i); const size_t _q = *(const uint64_t*)(a22 + i); #ifndef DGSSCL_H l1[_q] = l1[_p] = 0.0; #endif /* !DGSSCL_H */ if (!(nM <= DBL_MAX)) { w[i] = NAN; continue; } double _at, _c; fint _m, _n; #ifdef DGSSCL_H if (a21[i] == -3.0) { _m = -(fint)*m; _n = -(fint)*n; _at = w[i]; double e[2u] = { eS[_p], eS[_q] }; double f[2u] = { fS[_p], fS[_q] }; w[i] = dgsscl_(&_m, &_at, (G + _p * (*ldG)), (G + _q * (*ldG)), e, f); if (!(w[i] >= 0.0) || !(w[i] <= DBL_MAX)) { nM = w[i] = HUGE_VAL; continue; } else // no overflow nM = fmax(nM, w[i]); // TODO: should V be transformed and how (very small \tan)? continue; } else #endif /* DGSSCL_H */ if (a21[i] == -2.0) { _m = -(fint)*m; _n = -(fint)*n; _c = c[i]; _at = at[i]; } else if (a21[i] == -1.0) { double *const Gp = G + _p * (*ldG); double *const Gq = G + _q * (*ldG); if (_m = dswp_(m, Gp, Gq)) { w[i] = (double)_m; nM = HUGE_VAL; continue; } double *const Vp = V + _p * (*ldV); double *const Vq = V + _q * (*ldV); if (_n = dswp_(n, Vp, Vq)) { w[i] = (double)_n; nM = HUGE_VAL; continue; } nM = fmax(nM, (w[i] = 0.0)); continue; } else if (a21[i] == 1.0) { nM = fmax(nM, (w[i] = 0.0)); continue; } else if (a21[i] == 2.0) { _m = (fint)*m; _n = (fint)*n; _c = c[i]; _at = at[i]; } #ifdef DGSSCL_H else if (a21[i] == 3.0) { _m = (fint)*m; _n = (fint)*n; _at = w[i]; double e[2u] = { eS[_p], eS[_q] }; double f[2u] = { fS[_p], fS[_q] }; w[i] = dgsscl_(&_m, &_at, (G + _p * (*ldG)), (G + _q * (*ldG)), e, f); if (!(w[i] >= 0.0) || !(w[i] <= DBL_MAX)) { nM = w[i] = HUGE_VAL; continue; } else // no overflow nM = fmax(nM, w[i]); // TODO: should V be transformed and how (very small \tan)? continue; } #endif /* DGSSCL_H */ else { // should never happen w[i] = NAN; nM = HUGE_VAL; continue; } w[i] = djrot_(&_m, (G + _p * (*ldG)), (G + _q * (*ldG)), &_c, &_at); if (!(w[i] >= 0.0) || !(w[i] <= DBL_MAX)) { nM = w[i] = HUGE_VAL; continue; } else // no overflow nM = fmax(nM, w[i]); if (_m = djrotf_(&_n, (V + _p * (*ldV)), (V + _q * (*ldV)), &_c, &_at)) { w[i] = (double)_m; nM = HUGE_VAL; continue; } #ifndef DGSSCL_H l1[_q] = l1[_p] = 1.0; #endif /* !DGSSCL_H */ } M = fmax(M, nM); #ifdef JTRACE Tr += tsc_lap(hz, T, rdtsc_end(rd)); #endif /* JTRACE */ if (!(M <= DBL_MAX)) { #ifdef JTRACE (void)fprintf(jtr, "sweep=%u, step=%u\n", sw, st); (void)fflush(jtr); #endif /* JTRACE */ return -22; } } if (!swt) break; ++sw; } if (sw < *swp) { #ifdef _OPENMP #pragma omp parallel for default(none) shared(m,n,G,ldG,eS,fS,sT) #endif /* _OPENMP */ for (fnat j = 0u; j < *n; ++j) { double *const Gj = G + j * (size_t)(*ldG); register const VD _f = _mm512_set1_pd(fS[j]); register const VD _s = _mm512_set1_pd(-(eS[j])); for (fnat i = 0u; i < *m; i += VDL) { double *const Gij = Gj + i; _mm512_store_pd(Gij, _mm512_scalef_pd(_mm512_div_pd(_mm512_load_pd(Gij), _f), _s)); } eS[j] -= sT; } } #ifdef JTRACE (void)fprintf(jtr, "sT=%d, M=%#.17e\n", sT, M); (void)fprintf(jtr, "Tn=%15.9Lf, Tp=%15.9Lf, Ta=%15.9Lf, Te=%15.9Lf, Tr=%15.9Lf\n", Tn, Tp, Ta, Te, Tr); (void)fclose(jtr); #endif /* JTRACE */ return (fint)sw; }
rowwise_pick.h
/*! * Copyright (c) 2020 by Contributors * \file array/cpu/rowwise_pick.h * \brief Template implementation for rowwise pick operators. */ #ifndef DGL_ARRAY_CPU_ROWWISE_PICK_H_ #define DGL_ARRAY_CPU_ROWWISE_PICK_H_ #include <dgl/array.h> #include <functional> namespace dgl { namespace aten { namespace impl { // User-defined function for picking elements from one row. // // The column indices of the given row are stored in // [col + off, col + off + len) // // Similarly, the data indices are stored in // [data + off, data + off + len) // Data index pointer could be NULL, which means data[i] == i // // *ATTENTION*: This function will be invoked concurrently. Please make sure // it is thread-safe. // // \param rowid The row to pick from. // \param off Starting offset of this row. // \param len NNZ of the row. // \param col Pointer of the column indices. // \param data Pointer of the data indices. // \param out_idx Picked indices in [off, off + len). template <typename IdxType> using PickFn = std::function<void( IdxType rowid, IdxType off, IdxType len, const IdxType* col, const IdxType* data, IdxType* out_idx)>; // Template for picking non-zero values row-wise. The implementation utilizes // OpenMP parallelization on rows because each row performs computation independently. template <typename IdxType> COOMatrix CSRRowWisePick(CSRMatrix mat, IdArray rows, int64_t num_picks, bool replace, PickFn<IdxType> pick_fn) { using namespace aten; const IdxType* indptr = static_cast<IdxType*>(mat.indptr->data); const IdxType* indices = static_cast<IdxType*>(mat.indices->data); const IdxType* data = CSRHasData(mat)? static_cast<IdxType*>(mat.data->data) : nullptr; const IdxType* rows_data = static_cast<IdxType*>(rows->data); const int64_t num_rows = rows->shape[0]; const auto& ctx = mat.indptr->ctx; // To leverage OMP parallelization, we create two arrays to store // picked src and dst indices. Each array is of length num_rows * num_picks. // For rows whose nnz < num_picks, the indices are padded with -1. // // We check whether all the given rows // have at least num_picks number of nnz when replace is false. // // If the check holds, remove -1 elements by remove_if operation, which simply // moves valid elements to the head of arrays and create a view of the original // array. The implementation consumes a little extra memory than the actual requirement. // // Otherwise, directly use the row and col arrays to construct the result COO matrix. IdArray picked_row = Full(-1, num_rows * num_picks, sizeof(IdxType) * 8, ctx); IdArray picked_col = Full(-1, num_rows * num_picks, sizeof(IdxType) * 8, ctx); IdArray picked_idx = Full(-1, num_rows * num_picks, sizeof(IdxType) * 8, ctx); IdxType* picked_rdata = static_cast<IdxType*>(picked_row->data); IdxType* picked_cdata = static_cast<IdxType*>(picked_col->data); IdxType* picked_idata = static_cast<IdxType*>(picked_idx->data); bool all_has_fanout = true; if (replace) { all_has_fanout = true; } else { #pragma omp parallel for reduction(&&:all_has_fanout) for (int64_t i = 0; i < num_rows; ++i) { const IdxType rid = rows_data[i]; const IdxType len = indptr[rid + 1] - indptr[rid]; all_has_fanout = all_has_fanout && (len >= num_picks); } } #pragma omp parallel for for (int64_t i = 0; i < num_rows; ++i) { const IdxType rid = rows_data[i]; CHECK_LT(rid, mat.num_rows); const IdxType off = indptr[rid]; const IdxType len = indptr[rid + 1] - off; if (len <= num_picks && !replace) { // nnz <= num_picks and w/o replacement, take all nnz for (int64_t j = 0; j < len; ++j) { picked_rdata[i * num_picks + j] = rid; picked_cdata[i * num_picks + j] = indices[off + j]; picked_idata[i * num_picks + j] = data? data[off + j] : off + j; } } else { pick_fn(rid, off, len, indices, data, picked_idata + i * num_picks); for (int64_t j = 0; j < num_picks; ++j) { const IdxType picked = picked_idata[i * num_picks + j]; picked_rdata[i * num_picks + j] = rid; picked_cdata[i * num_picks + j] = indices[picked]; picked_idata[i * num_picks + j] = data? data[picked] : picked; } } } if (!all_has_fanout) { // correct the array by remove_if IdxType* new_row_end = std::remove_if(picked_rdata, picked_rdata + num_rows * num_picks, [] (IdxType i) { return i == -1; }); IdxType* new_col_end = std::remove_if(picked_cdata, picked_cdata + num_rows * num_picks, [] (IdxType i) { return i == -1; }); IdxType* new_idx_end = std::remove_if(picked_idata, picked_idata + num_rows * num_picks, [] (IdxType i) { return i == -1; }); const int64_t new_len = (new_row_end - picked_rdata); CHECK_EQ(new_col_end - picked_cdata, new_len); CHECK_EQ(new_idx_end - picked_idata, new_len); picked_row = picked_row.CreateView({new_len}, picked_row->dtype); picked_col = picked_col.CreateView({new_len}, picked_col->dtype); picked_idx = picked_idx.CreateView({new_len}, picked_idx->dtype); } return COOMatrix(mat.num_rows, mat.num_cols, picked_row, picked_col, picked_idx); } // Template for picking non-zero values row-wise. The implementation first slices // out the corresponding rows and then converts it to CSR format. It then performs // row-wise pick on the CSR matrix and rectifies the returned results. template <typename IdxType> COOMatrix COORowWisePick(COOMatrix mat, IdArray rows, int64_t num_picks, bool replace, PickFn<IdxType> pick_fn) { using namespace aten; const auto& csr = COOToCSR(COOSliceRows(mat, rows)); const IdArray new_rows = Range(0, rows->shape[0], rows->dtype.bits, rows->ctx); const auto& picked = CSRRowWisePick<IdxType>(csr, new_rows, num_picks, replace, pick_fn); return COOMatrix(mat.num_rows, mat.num_cols, IndexSelect(rows, picked.row), // map the row index to the correct one picked.col, picked.data); } } // namespace impl } // namespace aten } // namespace dgl #endif // DGL_ARRAY_CPU_ROWWISE_PICK_H_
omp-simd-clone.c
/* OMP constructs' SIMD clone supporting code. Copyright (C) 2005-2020 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ #include "config.h" #include "system.h" #include "coretypes.h" #include "backend.h" #include "target.h" #include "tree.h" #include "gimple.h" #include "cfghooks.h" #include "alloc-pool.h" #include "tree-pass.h" #include "ssa.h" #include "cgraph.h" #include "pretty-print.h" #include "diagnostic-core.h" #include "fold-const.h" #include "stor-layout.h" #include "cfganal.h" #include "gimplify.h" #include "gimple-iterator.h" #include "gimplify-me.h" #include "gimple-walk.h" #include "langhooks.h" #include "tree-cfg.h" #include "tree-into-ssa.h" #include "tree-dfa.h" #include "cfgloop.h" #include "symbol-summary.h" #include "ipa-param-manipulation.h" #include "tree-eh.h" #include "varasm.h" #include "stringpool.h" #include "attribs.h" #include "omp-simd-clone.h" /* Return the number of elements in vector type VECTYPE, which is associated with a SIMD clone. At present these always have a constant length. */ static unsigned HOST_WIDE_INT simd_clone_subparts (tree vectype) { return TYPE_VECTOR_SUBPARTS (vectype).to_constant (); } /* Allocate a fresh `simd_clone' and return it. NARGS is the number of arguments to reserve space for. */ static struct cgraph_simd_clone * simd_clone_struct_alloc (int nargs) { struct cgraph_simd_clone *clone_info; size_t len = (sizeof (struct cgraph_simd_clone) + nargs * sizeof (struct cgraph_simd_clone_arg)); clone_info = (struct cgraph_simd_clone *) ggc_internal_cleared_alloc (len); return clone_info; } /* Make a copy of the `struct cgraph_simd_clone' in FROM to TO. */ static inline void simd_clone_struct_copy (struct cgraph_simd_clone *to, struct cgraph_simd_clone *from) { memcpy (to, from, (sizeof (struct cgraph_simd_clone) + ((from->nargs - from->inbranch) * sizeof (struct cgraph_simd_clone_arg)))); } /* Fill an empty vector ARGS with parameter types of function FNDECL. This uses TYPE_ARG_TYPES if available, otherwise falls back to types of DECL_ARGUMENTS types. */ static void simd_clone_vector_of_formal_parm_types (vec<tree> *args, tree fndecl) { if (TYPE_ARG_TYPES (TREE_TYPE (fndecl))) { push_function_arg_types (args, TREE_TYPE (fndecl)); return; } push_function_arg_decls (args, fndecl); unsigned int i; tree arg; FOR_EACH_VEC_ELT (*args, i, arg) (*args)[i] = TREE_TYPE ((*args)[i]); } /* Given a simd function in NODE, extract the simd specific information from the OMP clauses passed in CLAUSES, and return the struct cgraph_simd_clone * if it should be cloned. *INBRANCH_SPECIFIED is set to TRUE if the `inbranch' or `notinbranch' clause specified, otherwise set to FALSE. */ static struct cgraph_simd_clone * simd_clone_clauses_extract (struct cgraph_node *node, tree clauses, bool *inbranch_specified) { auto_vec<tree> args; simd_clone_vector_of_formal_parm_types (&args, node->decl); tree t; int n; *inbranch_specified = false; n = args.length (); if (n > 0 && args.last () == void_type_node) n--; /* Allocate one more than needed just in case this is an in-branch clone which will require a mask argument. */ struct cgraph_simd_clone *clone_info = simd_clone_struct_alloc (n + 1); clone_info->nargs = n; if (!clauses) goto out; clauses = TREE_VALUE (clauses); if (!clauses || TREE_CODE (clauses) != OMP_CLAUSE) goto out; for (t = clauses; t; t = OMP_CLAUSE_CHAIN (t)) { switch (OMP_CLAUSE_CODE (t)) { case OMP_CLAUSE_INBRANCH: clone_info->inbranch = 1; *inbranch_specified = true; break; case OMP_CLAUSE_NOTINBRANCH: clone_info->inbranch = 0; *inbranch_specified = true; break; case OMP_CLAUSE_SIMDLEN: clone_info->simdlen = TREE_INT_CST_LOW (OMP_CLAUSE_SIMDLEN_EXPR (t)); break; case OMP_CLAUSE_LINEAR: { tree decl = OMP_CLAUSE_DECL (t); tree step = OMP_CLAUSE_LINEAR_STEP (t); int argno = TREE_INT_CST_LOW (decl); if (OMP_CLAUSE_LINEAR_VARIABLE_STRIDE (t)) { enum cgraph_simd_clone_arg_type arg_type; if (TREE_CODE (args[argno]) == REFERENCE_TYPE) switch (OMP_CLAUSE_LINEAR_KIND (t)) { case OMP_CLAUSE_LINEAR_REF: arg_type = SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP; break; case OMP_CLAUSE_LINEAR_UVAL: arg_type = SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP; break; case OMP_CLAUSE_LINEAR_VAL: case OMP_CLAUSE_LINEAR_DEFAULT: arg_type = SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP; break; default: gcc_unreachable (); } else arg_type = SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP; clone_info->args[argno].arg_type = arg_type; clone_info->args[argno].linear_step = tree_to_shwi (step); gcc_assert (clone_info->args[argno].linear_step >= 0 && clone_info->args[argno].linear_step < n); } else { if (POINTER_TYPE_P (args[argno])) step = fold_convert (ssizetype, step); if (!tree_fits_shwi_p (step)) { warning_at (OMP_CLAUSE_LOCATION (t), 0, "ignoring large linear step"); return NULL; } else if (integer_zerop (step)) { warning_at (OMP_CLAUSE_LOCATION (t), 0, "ignoring zero linear step"); return NULL; } else { enum cgraph_simd_clone_arg_type arg_type; if (TREE_CODE (args[argno]) == REFERENCE_TYPE) switch (OMP_CLAUSE_LINEAR_KIND (t)) { case OMP_CLAUSE_LINEAR_REF: arg_type = SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP; break; case OMP_CLAUSE_LINEAR_UVAL: arg_type = SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP; break; case OMP_CLAUSE_LINEAR_VAL: case OMP_CLAUSE_LINEAR_DEFAULT: arg_type = SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP; break; default: gcc_unreachable (); } else arg_type = SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP; clone_info->args[argno].arg_type = arg_type; clone_info->args[argno].linear_step = tree_to_shwi (step); } } break; } case OMP_CLAUSE_UNIFORM: { tree decl = OMP_CLAUSE_DECL (t); int argno = tree_to_uhwi (decl); clone_info->args[argno].arg_type = SIMD_CLONE_ARG_TYPE_UNIFORM; break; } case OMP_CLAUSE_ALIGNED: { /* Ignore aligned (x) for declare simd, for the ABI we really need an alignment specified. */ if (OMP_CLAUSE_ALIGNED_ALIGNMENT (t) == NULL_TREE) break; tree decl = OMP_CLAUSE_DECL (t); int argno = tree_to_uhwi (decl); clone_info->args[argno].alignment = TREE_INT_CST_LOW (OMP_CLAUSE_ALIGNED_ALIGNMENT (t)); break; } default: break; } } out: if (TYPE_ATOMIC (TREE_TYPE (TREE_TYPE (node->decl)))) { warning_at (DECL_SOURCE_LOCATION (node->decl), 0, "ignoring %<#pragma omp declare simd%> on function " "with %<_Atomic%> qualified return type"); return NULL; } for (unsigned int argno = 0; argno < clone_info->nargs; argno++) if (TYPE_ATOMIC (args[argno]) && clone_info->args[argno].arg_type != SIMD_CLONE_ARG_TYPE_UNIFORM) { warning_at (DECL_SOURCE_LOCATION (node->decl), 0, "ignoring %<#pragma omp declare simd%> on function " "with %<_Atomic%> qualified non-%<uniform%> argument"); args.release (); return NULL; } return clone_info; } /* Given a SIMD clone in NODE, calculate the characteristic data type and return the coresponding type. The characteristic data type is computed as described in the Intel Vector ABI. */ static tree simd_clone_compute_base_data_type (struct cgraph_node *node, struct cgraph_simd_clone *clone_info) { tree type = integer_type_node; tree fndecl = node->decl; /* a) For non-void function, the characteristic data type is the return type. */ if (TREE_CODE (TREE_TYPE (TREE_TYPE (fndecl))) != VOID_TYPE) type = TREE_TYPE (TREE_TYPE (fndecl)); /* b) If the function has any non-uniform, non-linear parameters, then the characteristic data type is the type of the first such parameter. */ else { auto_vec<tree> map; simd_clone_vector_of_formal_parm_types (&map, fndecl); for (unsigned int i = 0; i < clone_info->nargs; ++i) if (clone_info->args[i].arg_type == SIMD_CLONE_ARG_TYPE_VECTOR) { type = map[i]; break; } } /* c) If the characteristic data type determined by a) or b) above is struct, union, or class type which is pass-by-value (except for the type that maps to the built-in complex data type), the characteristic data type is int. */ if (RECORD_OR_UNION_TYPE_P (type) && !aggregate_value_p (type, NULL) && TREE_CODE (type) != COMPLEX_TYPE) return integer_type_node; /* d) If none of the above three classes is applicable, the characteristic data type is int. */ return type; /* e) For Intel Xeon Phi native and offload compilation, if the resulting characteristic data type is 8-bit or 16-bit integer data type, the characteristic data type is int. */ /* Well, we don't handle Xeon Phi yet. */ } static tree simd_clone_mangle (struct cgraph_node *node, struct cgraph_simd_clone *clone_info) { char vecsize_mangle = clone_info->vecsize_mangle; char mask = clone_info->inbranch ? 'M' : 'N'; unsigned int simdlen = clone_info->simdlen; unsigned int n; pretty_printer pp; gcc_assert (vecsize_mangle && simdlen); pp_string (&pp, "_ZGV"); pp_character (&pp, vecsize_mangle); pp_character (&pp, mask); pp_decimal_int (&pp, simdlen); for (n = 0; n < clone_info->nargs; ++n) { struct cgraph_simd_clone_arg arg = clone_info->args[n]; switch (arg.arg_type) { case SIMD_CLONE_ARG_TYPE_UNIFORM: pp_character (&pp, 'u'); break; case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP: pp_character (&pp, 'l'); goto mangle_linear; case SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP: pp_character (&pp, 'R'); goto mangle_linear; case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP: pp_character (&pp, 'L'); goto mangle_linear; case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP: pp_character (&pp, 'U'); goto mangle_linear; mangle_linear: gcc_assert (arg.linear_step != 0); if (arg.linear_step > 1) pp_unsigned_wide_integer (&pp, arg.linear_step); else if (arg.linear_step < 0) { pp_character (&pp, 'n'); pp_unsigned_wide_integer (&pp, (-(unsigned HOST_WIDE_INT) arg.linear_step)); } break; case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP: pp_string (&pp, "ls"); pp_unsigned_wide_integer (&pp, arg.linear_step); break; case SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP: pp_string (&pp, "Rs"); pp_unsigned_wide_integer (&pp, arg.linear_step); break; case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP: pp_string (&pp, "Ls"); pp_unsigned_wide_integer (&pp, arg.linear_step); break; case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP: pp_string (&pp, "Us"); pp_unsigned_wide_integer (&pp, arg.linear_step); break; default: pp_character (&pp, 'v'); } if (arg.alignment) { pp_character (&pp, 'a'); pp_decimal_int (&pp, arg.alignment); } } pp_underscore (&pp); const char *str = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (node->decl)); if (*str == '*') ++str; pp_string (&pp, str); str = pp_formatted_text (&pp); /* If there already is a SIMD clone with the same mangled name, don't add another one. This can happen e.g. for #pragma omp declare simd #pragma omp declare simd simdlen(8) int foo (int, int); if the simdlen is assumed to be 8 for the first one, etc. */ for (struct cgraph_node *clone = node->simd_clones; clone; clone = clone->simdclone->next_clone) if (id_equal (DECL_ASSEMBLER_NAME (clone->decl), str)) return NULL_TREE; return get_identifier (str); } /* Create a simd clone of OLD_NODE and return it. */ static struct cgraph_node * simd_clone_create (struct cgraph_node *old_node) { struct cgraph_node *new_node; if (old_node->definition) { if (!old_node->has_gimple_body_p ()) return NULL; old_node->get_body (); new_node = old_node->create_version_clone_with_body (vNULL, NULL, NULL, NULL, NULL, "simdclone"); } else { tree old_decl = old_node->decl; tree new_decl = copy_node (old_node->decl); DECL_NAME (new_decl) = clone_function_name_numbered (old_decl, "simdclone"); SET_DECL_ASSEMBLER_NAME (new_decl, DECL_NAME (new_decl)); SET_DECL_RTL (new_decl, NULL); DECL_STATIC_CONSTRUCTOR (new_decl) = 0; DECL_STATIC_DESTRUCTOR (new_decl) = 0; new_node = old_node->create_version_clone (new_decl, vNULL, NULL); if (old_node->in_other_partition) new_node->in_other_partition = 1; } if (new_node == NULL) return new_node; set_decl_built_in_function (new_node->decl, NOT_BUILT_IN, 0); TREE_PUBLIC (new_node->decl) = TREE_PUBLIC (old_node->decl); DECL_COMDAT (new_node->decl) = DECL_COMDAT (old_node->decl); DECL_WEAK (new_node->decl) = DECL_WEAK (old_node->decl); DECL_EXTERNAL (new_node->decl) = DECL_EXTERNAL (old_node->decl); DECL_VISIBILITY_SPECIFIED (new_node->decl) = DECL_VISIBILITY_SPECIFIED (old_node->decl); DECL_VISIBILITY (new_node->decl) = DECL_VISIBILITY (old_node->decl); DECL_DLLIMPORT_P (new_node->decl) = DECL_DLLIMPORT_P (old_node->decl); if (DECL_ONE_ONLY (old_node->decl)) make_decl_one_only (new_node->decl, DECL_ASSEMBLER_NAME (new_node->decl)); /* The method cgraph_version_clone_with_body () will force the new symbol local. Undo this, and inherit external visibility from the old node. */ new_node->local = old_node->local; new_node->externally_visible = old_node->externally_visible; new_node->calls_declare_variant_alt = old_node->calls_declare_variant_alt; return new_node; } /* Adjust the return type of the given function to its appropriate vector counterpart. Returns a simd array to be used throughout the function as a return value. */ static tree simd_clone_adjust_return_type (struct cgraph_node *node) { tree fndecl = node->decl; tree orig_rettype = TREE_TYPE (TREE_TYPE (fndecl)); unsigned int veclen; tree t; /* Adjust the function return type. */ if (orig_rettype == void_type_node) return NULL_TREE; t = TREE_TYPE (TREE_TYPE (fndecl)); if (INTEGRAL_TYPE_P (t) || POINTER_TYPE_P (t)) veclen = node->simdclone->vecsize_int; else veclen = node->simdclone->vecsize_float; veclen /= GET_MODE_BITSIZE (SCALAR_TYPE_MODE (t)); if (veclen > node->simdclone->simdlen) veclen = node->simdclone->simdlen; if (POINTER_TYPE_P (t)) t = pointer_sized_int_node; if (veclen == node->simdclone->simdlen) t = build_vector_type (t, node->simdclone->simdlen); else { t = build_vector_type (t, veclen); t = build_array_type_nelts (t, node->simdclone->simdlen / veclen); } TREE_TYPE (TREE_TYPE (fndecl)) = t; if (!node->definition) return NULL_TREE; t = DECL_RESULT (fndecl); /* Adjust the DECL_RESULT. */ gcc_assert (TREE_TYPE (t) != void_type_node); TREE_TYPE (t) = TREE_TYPE (TREE_TYPE (fndecl)); relayout_decl (t); tree atype = build_array_type_nelts (orig_rettype, node->simdclone->simdlen); if (veclen != node->simdclone->simdlen) return build1 (VIEW_CONVERT_EXPR, atype, t); /* Set up a SIMD array to use as the return value. */ tree retval = create_tmp_var_raw (atype, "retval"); gimple_add_tmp_var (retval); return retval; } /* Each vector argument has a corresponding array to be used locally as part of the eventual loop. Create such temporary array and return it. PREFIX is the prefix to be used for the temporary. TYPE is the inner element type. SIMDLEN is the number of elements. */ static tree create_tmp_simd_array (const char *prefix, tree type, int simdlen) { tree atype = build_array_type_nelts (type, simdlen); tree avar = create_tmp_var_raw (atype, prefix); gimple_add_tmp_var (avar); return avar; } /* Modify the function argument types to their corresponding vector counterparts if appropriate. Also, create one array for each simd argument to be used locally when using the function arguments as part of the loop. NODE is the function whose arguments are to be adjusted. If NODE does not represent function definition, returns NULL. Otherwise returns an adjustment class that will be filled describing how the argument declarations will be remapped. New arguments which are not to be remapped are marked with USER_FLAG. */ static ipa_param_body_adjustments * simd_clone_adjust_argument_types (struct cgraph_node *node) { auto_vec<tree> args; if (node->definition) push_function_arg_decls (&args, node->decl); else simd_clone_vector_of_formal_parm_types (&args, node->decl); struct cgraph_simd_clone *sc = node->simdclone; vec<ipa_adjusted_param, va_gc> *new_params = NULL; vec_safe_reserve (new_params, sc->nargs); unsigned i, j, veclen; for (i = 0; i < sc->nargs; ++i) { ipa_adjusted_param adj; memset (&adj, 0, sizeof (adj)); tree parm = args[i]; tree parm_type = node->definition ? TREE_TYPE (parm) : parm; adj.base_index = i; adj.prev_clone_index = i; sc->args[i].orig_arg = node->definition ? parm : NULL_TREE; sc->args[i].orig_type = parm_type; switch (sc->args[i].arg_type) { default: /* No adjustment necessary for scalar arguments. */ adj.op = IPA_PARAM_OP_COPY; break; case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP: case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP: if (node->definition) sc->args[i].simd_array = create_tmp_simd_array (IDENTIFIER_POINTER (DECL_NAME (parm)), TREE_TYPE (parm_type), sc->simdlen); adj.op = IPA_PARAM_OP_COPY; break; case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP: case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP: case SIMD_CLONE_ARG_TYPE_VECTOR: if (INTEGRAL_TYPE_P (parm_type) || POINTER_TYPE_P (parm_type)) veclen = sc->vecsize_int; else veclen = sc->vecsize_float; veclen /= GET_MODE_BITSIZE (SCALAR_TYPE_MODE (parm_type)); if (veclen > sc->simdlen) veclen = sc->simdlen; adj.op = IPA_PARAM_OP_NEW; adj.param_prefix_index = IPA_PARAM_PREFIX_SIMD; if (POINTER_TYPE_P (parm_type)) adj.type = build_vector_type (pointer_sized_int_node, veclen); else adj.type = build_vector_type (parm_type, veclen); sc->args[i].vector_type = adj.type; for (j = veclen; j < sc->simdlen; j += veclen) { vec_safe_push (new_params, adj); if (j == veclen) { memset (&adj, 0, sizeof (adj)); adj.op = IPA_PARAM_OP_NEW; adj.user_flag = 1; adj.param_prefix_index = IPA_PARAM_PREFIX_SIMD; adj.base_index = i; adj.prev_clone_index = i; adj.type = sc->args[i].vector_type; } } if (node->definition) sc->args[i].simd_array = create_tmp_simd_array (DECL_NAME (parm) ? IDENTIFIER_POINTER (DECL_NAME (parm)) : NULL, parm_type, sc->simdlen); } vec_safe_push (new_params, adj); } if (sc->inbranch) { tree base_type = simd_clone_compute_base_data_type (sc->origin, sc); ipa_adjusted_param adj; memset (&adj, 0, sizeof (adj)); adj.op = IPA_PARAM_OP_NEW; adj.user_flag = 1; adj.param_prefix_index = IPA_PARAM_PREFIX_MASK; adj.base_index = i; adj.prev_clone_index = i; if (INTEGRAL_TYPE_P (base_type) || POINTER_TYPE_P (base_type)) veclen = sc->vecsize_int; else veclen = sc->vecsize_float; veclen /= GET_MODE_BITSIZE (SCALAR_TYPE_MODE (base_type)); if (veclen > sc->simdlen) veclen = sc->simdlen; if (sc->mask_mode != VOIDmode) adj.type = lang_hooks.types.type_for_mode (sc->mask_mode, 1); else if (POINTER_TYPE_P (base_type)) adj.type = build_vector_type (pointer_sized_int_node, veclen); else adj.type = build_vector_type (base_type, veclen); vec_safe_push (new_params, adj); for (j = veclen; j < sc->simdlen; j += veclen) vec_safe_push (new_params, adj); /* We have previously allocated one extra entry for the mask. Use it and fill it. */ sc->nargs++; if (sc->mask_mode != VOIDmode) base_type = boolean_type_node; if (node->definition) { sc->args[i].orig_arg = build_decl (UNKNOWN_LOCATION, PARM_DECL, NULL, base_type); if (sc->mask_mode == VOIDmode) sc->args[i].simd_array = create_tmp_simd_array ("mask", base_type, sc->simdlen); else if (veclen < sc->simdlen) sc->args[i].simd_array = create_tmp_simd_array ("mask", adj.type, sc->simdlen / veclen); else sc->args[i].simd_array = NULL_TREE; } sc->args[i].orig_type = base_type; sc->args[i].arg_type = SIMD_CLONE_ARG_TYPE_MASK; } if (node->definition) { ipa_param_body_adjustments *adjustments = new ipa_param_body_adjustments (new_params, node->decl); adjustments->modify_formal_parameters (); return adjustments; } else { tree new_arg_types = NULL_TREE, new_reversed; bool last_parm_void = false; if (args.length () > 0 && args.last () == void_type_node) last_parm_void = true; gcc_assert (TYPE_ARG_TYPES (TREE_TYPE (node->decl))); j = vec_safe_length (new_params); for (i = 0; i < j; i++) { struct ipa_adjusted_param *adj = &(*new_params)[i]; tree ptype; if (adj->op == IPA_PARAM_OP_COPY) ptype = args[adj->base_index]; else ptype = adj->type; new_arg_types = tree_cons (NULL_TREE, ptype, new_arg_types); } new_reversed = nreverse (new_arg_types); if (last_parm_void) { if (new_reversed) TREE_CHAIN (new_arg_types) = void_list_node; else new_reversed = void_list_node; } TYPE_ARG_TYPES (TREE_TYPE (node->decl)) = new_reversed; return NULL; } } /* Initialize and copy the function arguments in NODE to their corresponding local simd arrays. Returns a fresh gimple_seq with the instruction sequence generated. */ static gimple_seq simd_clone_init_simd_arrays (struct cgraph_node *node, ipa_param_body_adjustments *adjustments) { gimple_seq seq = NULL; unsigned i = 0, j = 0, k; for (tree arg = DECL_ARGUMENTS (node->decl); arg; arg = DECL_CHAIN (arg), i++, j++) { if ((*adjustments->m_adj_params)[j].op == IPA_PARAM_OP_COPY || POINTER_TYPE_P (TREE_TYPE (arg))) continue; node->simdclone->args[i].vector_arg = arg; tree array = node->simdclone->args[i].simd_array; if (node->simdclone->mask_mode != VOIDmode && node->simdclone->args[i].arg_type == SIMD_CLONE_ARG_TYPE_MASK) { if (array == NULL_TREE) continue; unsigned int l = tree_to_uhwi (TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (array)))); for (k = 0; k <= l; k++) { if (k) { arg = DECL_CHAIN (arg); j++; } tree t = build4 (ARRAY_REF, TREE_TYPE (TREE_TYPE (array)), array, size_int (k), NULL, NULL); t = build2 (MODIFY_EXPR, TREE_TYPE (t), t, arg); gimplify_and_add (t, &seq); } continue; } if (simd_clone_subparts (TREE_TYPE (arg)) == node->simdclone->simdlen) { tree ptype = build_pointer_type (TREE_TYPE (TREE_TYPE (array))); tree ptr = build_fold_addr_expr (array); tree t = build2 (MEM_REF, TREE_TYPE (arg), ptr, build_int_cst (ptype, 0)); t = build2 (MODIFY_EXPR, TREE_TYPE (t), t, arg); gimplify_and_add (t, &seq); } else { unsigned int simdlen = simd_clone_subparts (TREE_TYPE (arg)); tree ptype = build_pointer_type (TREE_TYPE (TREE_TYPE (array))); for (k = 0; k < node->simdclone->simdlen; k += simdlen) { tree ptr = build_fold_addr_expr (array); int elemsize; if (k) { arg = DECL_CHAIN (arg); j++; } tree elemtype = TREE_TYPE (TREE_TYPE (arg)); elemsize = GET_MODE_SIZE (SCALAR_TYPE_MODE (elemtype)); tree t = build2 (MEM_REF, TREE_TYPE (arg), ptr, build_int_cst (ptype, k * elemsize)); t = build2 (MODIFY_EXPR, TREE_TYPE (t), t, arg); gimplify_and_add (t, &seq); } } } return seq; } /* Callback info for ipa_simd_modify_stmt_ops below. */ struct modify_stmt_info { ipa_param_body_adjustments *adjustments; gimple *stmt; gimple *after_stmt; /* True if the parent statement was modified by ipa_simd_modify_stmt_ops. */ bool modified; }; /* Callback for walk_gimple_op. Adjust operands from a given statement as specified in the adjustments vector in the callback data. */ static tree ipa_simd_modify_stmt_ops (tree *tp, int *walk_subtrees, void *data) { struct walk_stmt_info *wi = (struct walk_stmt_info *) data; struct modify_stmt_info *info = (struct modify_stmt_info *) wi->info; tree *orig_tp = tp; if (TREE_CODE (*tp) == ADDR_EXPR) tp = &TREE_OPERAND (*tp, 0); if (TREE_CODE (*tp) == BIT_FIELD_REF || TREE_CODE (*tp) == IMAGPART_EXPR || TREE_CODE (*tp) == REALPART_EXPR) tp = &TREE_OPERAND (*tp, 0); tree repl = NULL_TREE; ipa_param_body_replacement *pbr = NULL; if (TREE_CODE (*tp) == PARM_DECL) { pbr = info->adjustments->get_expr_replacement (*tp, true); if (pbr) repl = pbr->repl; } else if (TYPE_P (*tp)) *walk_subtrees = 0; if (repl) repl = unshare_expr (repl); else { if (tp != orig_tp) { *walk_subtrees = 0; bool modified = info->modified; info->modified = false; walk_tree (tp, ipa_simd_modify_stmt_ops, wi, wi->pset); if (!info->modified) { info->modified = modified; return NULL_TREE; } info->modified = modified; repl = *tp; } else return NULL_TREE; } if (tp != orig_tp) { if (gimple_code (info->stmt) == GIMPLE_PHI && pbr && TREE_CODE (*orig_tp) == ADDR_EXPR && TREE_CODE (TREE_OPERAND (*orig_tp, 0)) == PARM_DECL && pbr->dummy) { gcc_assert (TREE_CODE (pbr->dummy) == SSA_NAME); *orig_tp = pbr->dummy; info->modified = true; return NULL_TREE; } repl = build_fold_addr_expr (repl); gimple *stmt; if (is_gimple_debug (info->stmt)) { tree vexpr = make_node (DEBUG_EXPR_DECL); stmt = gimple_build_debug_source_bind (vexpr, repl, NULL); DECL_ARTIFICIAL (vexpr) = 1; TREE_TYPE (vexpr) = TREE_TYPE (repl); SET_DECL_MODE (vexpr, TYPE_MODE (TREE_TYPE (repl))); repl = vexpr; } else { stmt = gimple_build_assign (make_ssa_name (TREE_TYPE (repl)), repl); repl = gimple_assign_lhs (stmt); } gimple_stmt_iterator gsi; if (gimple_code (info->stmt) == GIMPLE_PHI) { if (info->after_stmt) gsi = gsi_for_stmt (info->after_stmt); else gsi = gsi_after_labels (single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun))); /* Cache SSA_NAME for next time. */ if (pbr && TREE_CODE (*orig_tp) == ADDR_EXPR && TREE_CODE (TREE_OPERAND (*orig_tp, 0)) == PARM_DECL) { gcc_assert (!pbr->dummy); pbr->dummy = repl; } } else gsi = gsi_for_stmt (info->stmt); if (info->after_stmt) gsi_insert_after (&gsi, stmt, GSI_SAME_STMT); else gsi_insert_before (&gsi, stmt, GSI_SAME_STMT); if (gimple_code (info->stmt) == GIMPLE_PHI) info->after_stmt = stmt; *orig_tp = repl; } else if (!useless_type_conversion_p (TREE_TYPE (*tp), TREE_TYPE (repl))) { tree vce = build1 (VIEW_CONVERT_EXPR, TREE_TYPE (*tp), repl); *tp = vce; } else *tp = repl; info->modified = true; return NULL_TREE; } /* Traverse the function body and perform all modifications as described in ADJUSTMENTS. At function return, ADJUSTMENTS will be modified such that the replacement/reduction value will now be an offset into the corresponding simd_array. This function will replace all function argument uses with their corresponding simd array elements, and ajust the return values accordingly. */ static void ipa_simd_modify_function_body (struct cgraph_node *node, ipa_param_body_adjustments *adjustments, tree retval_array, tree iter) { basic_block bb; unsigned int i, j; /* Register replacements for every function argument use to an offset into the corresponding simd_array. */ for (i = 0, j = 0; i < node->simdclone->nargs; ++i, ++j) { if (!node->simdclone->args[i].vector_arg || (*adjustments->m_adj_params)[j].user_flag) continue; tree basetype = TREE_TYPE (node->simdclone->args[i].orig_arg); tree vectype = TREE_TYPE (node->simdclone->args[i].vector_arg); tree r = build4 (ARRAY_REF, basetype, node->simdclone->args[i].simd_array, iter, NULL_TREE, NULL_TREE); adjustments->register_replacement (&(*adjustments->m_adj_params)[j], r); if (simd_clone_subparts (vectype) < node->simdclone->simdlen) j += node->simdclone->simdlen / simd_clone_subparts (vectype) - 1; } tree name; FOR_EACH_SSA_NAME (i, name, cfun) { tree base_var; if (SSA_NAME_VAR (name) && TREE_CODE (SSA_NAME_VAR (name)) == PARM_DECL && (base_var = adjustments->get_replacement_ssa_base (SSA_NAME_VAR (name)))) { if (SSA_NAME_IS_DEFAULT_DEF (name)) { tree old_decl = SSA_NAME_VAR (name); bb = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)); gimple_stmt_iterator gsi = gsi_after_labels (bb); tree repl = adjustments->lookup_replacement (old_decl, 0); gcc_checking_assert (repl); repl = unshare_expr (repl); set_ssa_default_def (cfun, old_decl, NULL_TREE); SET_SSA_NAME_VAR_OR_IDENTIFIER (name, base_var); SSA_NAME_IS_DEFAULT_DEF (name) = 0; gimple *stmt = gimple_build_assign (name, repl); gsi_insert_before (&gsi, stmt, GSI_SAME_STMT); } else SET_SSA_NAME_VAR_OR_IDENTIFIER (name, base_var); } } struct modify_stmt_info info; info.adjustments = adjustments; FOR_EACH_BB_FN (bb, DECL_STRUCT_FUNCTION (node->decl)) { gimple_stmt_iterator gsi; for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi)) { gphi *phi = as_a <gphi *> (gsi_stmt (gsi)); int i, n = gimple_phi_num_args (phi); info.stmt = phi; info.after_stmt = NULL; struct walk_stmt_info wi; memset (&wi, 0, sizeof (wi)); info.modified = false; wi.info = &info; for (i = 0; i < n; ++i) { int walk_subtrees = 1; tree arg = gimple_phi_arg_def (phi, i); tree op = arg; ipa_simd_modify_stmt_ops (&op, &walk_subtrees, &wi); if (op != arg) { SET_PHI_ARG_DEF (phi, i, op); gcc_assert (TREE_CODE (op) == SSA_NAME); if (gimple_phi_arg_edge (phi, i)->flags & EDGE_ABNORMAL) SSA_NAME_OCCURS_IN_ABNORMAL_PHI (op) = 1; } } } gsi = gsi_start_bb (bb); while (!gsi_end_p (gsi)) { gimple *stmt = gsi_stmt (gsi); info.stmt = stmt; info.after_stmt = NULL; struct walk_stmt_info wi; memset (&wi, 0, sizeof (wi)); info.modified = false; wi.info = &info; walk_gimple_op (stmt, ipa_simd_modify_stmt_ops, &wi); if (greturn *return_stmt = dyn_cast <greturn *> (stmt)) { tree retval = gimple_return_retval (return_stmt); edge e = find_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun)); e->flags |= EDGE_FALLTHRU; if (!retval) { gsi_remove (&gsi, true); continue; } /* Replace `return foo' with `retval_array[iter] = foo'. */ tree ref = build4 (ARRAY_REF, TREE_TYPE (retval), retval_array, iter, NULL, NULL); stmt = gimple_build_assign (ref, retval); gsi_replace (&gsi, stmt, true); info.modified = true; } if (info.modified) { update_stmt (stmt); /* If the above changed the var of a debug bind into something different, remove the debug stmt. We could also for all the replaced parameters add VAR_DECLs for debug info purposes, add debug stmts for those to be the simd array accesses and replace debug stmt var operand with that var. Debugging of vectorized loops doesn't work too well, so don't bother for now. */ if ((gimple_debug_bind_p (stmt) && !DECL_P (gimple_debug_bind_get_var (stmt))) || (gimple_debug_source_bind_p (stmt) && !DECL_P (gimple_debug_source_bind_get_var (stmt)))) { gsi_remove (&gsi, true); continue; } if (maybe_clean_eh_stmt (stmt)) gimple_purge_dead_eh_edges (gimple_bb (stmt)); } gsi_next (&gsi); } } } /* Helper function of simd_clone_adjust, return linear step addend of Ith argument. */ static tree simd_clone_linear_addend (struct cgraph_node *node, unsigned int i, tree addtype, basic_block entry_bb) { tree ptype = NULL_TREE; switch (node->simdclone->args[i].arg_type) { case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP: case SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP: case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP: case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP: return build_int_cst (addtype, node->simdclone->args[i].linear_step); case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP: case SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP: ptype = TREE_TYPE (node->simdclone->args[i].orig_arg); break; case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP: case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP: ptype = TREE_TYPE (TREE_TYPE (node->simdclone->args[i].orig_arg)); break; default: gcc_unreachable (); } unsigned int idx = node->simdclone->args[i].linear_step; tree arg = node->simdclone->args[idx].orig_arg; gcc_assert (is_gimple_reg_type (TREE_TYPE (arg))); gimple_stmt_iterator gsi = gsi_after_labels (entry_bb); gimple *g; tree ret; if (is_gimple_reg (arg)) ret = get_or_create_ssa_default_def (cfun, arg); else { g = gimple_build_assign (make_ssa_name (TREE_TYPE (arg)), arg); gsi_insert_before (&gsi, g, GSI_SAME_STMT); ret = gimple_assign_lhs (g); } if (TREE_CODE (TREE_TYPE (arg)) == REFERENCE_TYPE) { g = gimple_build_assign (make_ssa_name (TREE_TYPE (TREE_TYPE (arg))), build_simple_mem_ref (ret)); gsi_insert_before (&gsi, g, GSI_SAME_STMT); ret = gimple_assign_lhs (g); } if (!useless_type_conversion_p (addtype, TREE_TYPE (ret))) { g = gimple_build_assign (make_ssa_name (addtype), NOP_EXPR, ret); gsi_insert_before (&gsi, g, GSI_SAME_STMT); ret = gimple_assign_lhs (g); } if (POINTER_TYPE_P (ptype)) { tree size = TYPE_SIZE_UNIT (TREE_TYPE (ptype)); if (size && TREE_CODE (size) == INTEGER_CST) { g = gimple_build_assign (make_ssa_name (addtype), MULT_EXPR, ret, fold_convert (addtype, size)); gsi_insert_before (&gsi, g, GSI_SAME_STMT); ret = gimple_assign_lhs (g); } } return ret; } /* Adjust the argument types in NODE to their appropriate vector counterparts. */ static void simd_clone_adjust (struct cgraph_node *node) { push_cfun (DECL_STRUCT_FUNCTION (node->decl)); TREE_TYPE (node->decl) = build_distinct_type_copy (TREE_TYPE (node->decl)); targetm.simd_clone.adjust (node); tree retval = simd_clone_adjust_return_type (node); ipa_param_body_adjustments *adjustments = simd_clone_adjust_argument_types (node); gcc_assert (adjustments); push_gimplify_context (); gimple_seq seq = simd_clone_init_simd_arrays (node, adjustments); /* Adjust all uses of vector arguments accordingly. Adjust all return values accordingly. */ tree iter = create_tmp_var (unsigned_type_node, "iter"); tree iter1 = make_ssa_name (iter); tree iter2 = NULL_TREE; ipa_simd_modify_function_body (node, adjustments, retval, iter1); delete adjustments; /* Initialize the iteration variable. */ basic_block entry_bb = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)); basic_block body_bb = split_block_after_labels (entry_bb)->dest; gimple_stmt_iterator gsi = gsi_after_labels (entry_bb); /* Insert the SIMD array and iv initialization at function entry. */ gsi_insert_seq_before (&gsi, seq, GSI_NEW_STMT); pop_gimplify_context (NULL); gimple *g; basic_block incr_bb = NULL; class loop *loop = NULL; /* Create a new BB right before the original exit BB, to hold the iteration increment and the condition/branch. */ if (EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)) { basic_block orig_exit = EDGE_PRED (EXIT_BLOCK_PTR_FOR_FN (cfun), 0)->src; incr_bb = create_empty_bb (orig_exit); incr_bb->count = profile_count::zero (); add_bb_to_loop (incr_bb, body_bb->loop_father); while (EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)) { edge e = EDGE_PRED (EXIT_BLOCK_PTR_FOR_FN (cfun), 0); redirect_edge_succ (e, incr_bb); incr_bb->count += e->count (); } } else if (node->simdclone->inbranch) { incr_bb = create_empty_bb (entry_bb); incr_bb->count = profile_count::zero (); add_bb_to_loop (incr_bb, body_bb->loop_father); } if (incr_bb) { make_single_succ_edge (incr_bb, EXIT_BLOCK_PTR_FOR_FN (cfun), 0); gsi = gsi_last_bb (incr_bb); iter2 = make_ssa_name (iter); g = gimple_build_assign (iter2, PLUS_EXPR, iter1, build_int_cst (unsigned_type_node, 1)); gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING); /* Mostly annotate the loop for the vectorizer (the rest is done below). */ loop = alloc_loop (); cfun->has_force_vectorize_loops = true; loop->safelen = node->simdclone->simdlen; loop->force_vectorize = true; loop->header = body_bb; } /* Branch around the body if the mask applies. */ if (node->simdclone->inbranch) { gsi = gsi_last_bb (loop->header); tree mask_array = node->simdclone->args[node->simdclone->nargs - 1].simd_array; tree mask; if (node->simdclone->mask_mode != VOIDmode) { tree shift_cnt; if (mask_array == NULL_TREE) { tree arg = node->simdclone->args[node->simdclone->nargs - 1].vector_arg; mask = get_or_create_ssa_default_def (cfun, arg); shift_cnt = iter1; } else { tree maskt = TREE_TYPE (mask_array); int c = tree_to_uhwi (TYPE_MAX_VALUE (TYPE_DOMAIN (maskt))); c = node->simdclone->simdlen / (c + 1); int s = exact_log2 (c); gcc_assert (s > 0); c--; tree idx = make_ssa_name (TREE_TYPE (iter1)); g = gimple_build_assign (idx, RSHIFT_EXPR, iter1, build_int_cst (NULL_TREE, s)); gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING); mask = make_ssa_name (TREE_TYPE (TREE_TYPE (mask_array))); tree aref = build4 (ARRAY_REF, TREE_TYPE (TREE_TYPE (mask_array)), mask_array, idx, NULL, NULL); g = gimple_build_assign (mask, aref); gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING); shift_cnt = make_ssa_name (TREE_TYPE (iter1)); g = gimple_build_assign (shift_cnt, BIT_AND_EXPR, iter1, build_int_cst (TREE_TYPE (iter1), c)); gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING); } g = gimple_build_assign (make_ssa_name (TREE_TYPE (mask)), RSHIFT_EXPR, mask, shift_cnt); gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING); mask = gimple_assign_lhs (g); g = gimple_build_assign (make_ssa_name (TREE_TYPE (mask)), BIT_AND_EXPR, mask, build_int_cst (TREE_TYPE (mask), 1)); gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING); mask = gimple_assign_lhs (g); } else { mask = make_ssa_name (TREE_TYPE (TREE_TYPE (mask_array))); tree aref = build4 (ARRAY_REF, TREE_TYPE (TREE_TYPE (mask_array)), mask_array, iter1, NULL, NULL); g = gimple_build_assign (mask, aref); gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING); int bitsize = GET_MODE_BITSIZE (SCALAR_TYPE_MODE (TREE_TYPE (aref))); if (!INTEGRAL_TYPE_P (TREE_TYPE (aref))) { aref = build1 (VIEW_CONVERT_EXPR, build_nonstandard_integer_type (bitsize, 0), mask); mask = make_ssa_name (TREE_TYPE (aref)); g = gimple_build_assign (mask, aref); gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING); } } g = gimple_build_cond (EQ_EXPR, mask, build_zero_cst (TREE_TYPE (mask)), NULL, NULL); gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING); edge e = make_edge (loop->header, incr_bb, EDGE_TRUE_VALUE); e->probability = profile_probability::unlikely ().guessed (); incr_bb->count += e->count (); edge fallthru = FALLTHRU_EDGE (loop->header); fallthru->flags = EDGE_FALSE_VALUE; fallthru->probability = profile_probability::likely ().guessed (); } basic_block latch_bb = NULL; basic_block new_exit_bb = NULL; /* Generate the condition. */ if (incr_bb) { gsi = gsi_last_bb (incr_bb); g = gimple_build_cond (LT_EXPR, iter2, build_int_cst (unsigned_type_node, node->simdclone->simdlen), NULL, NULL); gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING); edge e = split_block (incr_bb, gsi_stmt (gsi)); latch_bb = e->dest; new_exit_bb = split_block_after_labels (latch_bb)->dest; loop->latch = latch_bb; redirect_edge_succ (FALLTHRU_EDGE (latch_bb), body_bb); edge new_e = make_edge (incr_bb, new_exit_bb, EDGE_FALSE_VALUE); /* FIXME: Do we need to distribute probabilities for the conditional? */ new_e->probability = profile_probability::guessed_never (); /* The successor of incr_bb is already pointing to latch_bb; just change the flags. make_edge (incr_bb, latch_bb, EDGE_TRUE_VALUE); */ FALLTHRU_EDGE (incr_bb)->flags = EDGE_TRUE_VALUE; } gphi *phi = create_phi_node (iter1, body_bb); edge preheader_edge = find_edge (entry_bb, body_bb); edge latch_edge = NULL; add_phi_arg (phi, build_zero_cst (unsigned_type_node), preheader_edge, UNKNOWN_LOCATION); if (incr_bb) { latch_edge = single_succ_edge (latch_bb); add_phi_arg (phi, iter2, latch_edge, UNKNOWN_LOCATION); /* Generate the new return. */ gsi = gsi_last_bb (new_exit_bb); if (retval && TREE_CODE (retval) == VIEW_CONVERT_EXPR && TREE_CODE (TREE_OPERAND (retval, 0)) == RESULT_DECL) retval = TREE_OPERAND (retval, 0); else if (retval) { retval = build1 (VIEW_CONVERT_EXPR, TREE_TYPE (TREE_TYPE (node->decl)), retval); retval = force_gimple_operand_gsi (&gsi, retval, true, NULL, false, GSI_CONTINUE_LINKING); } g = gimple_build_return (retval); gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING); } /* Handle aligned clauses by replacing default defs of the aligned uniform args with __builtin_assume_aligned (arg_N(D), alignment) lhs. Handle linear by adding PHIs. */ for (unsigned i = 0; i < node->simdclone->nargs; i++) if (node->simdclone->args[i].arg_type == SIMD_CLONE_ARG_TYPE_UNIFORM && (TREE_ADDRESSABLE (node->simdclone->args[i].orig_arg) || !is_gimple_reg_type (TREE_TYPE (node->simdclone->args[i].orig_arg)))) { tree orig_arg = node->simdclone->args[i].orig_arg; if (is_gimple_reg_type (TREE_TYPE (orig_arg))) iter1 = make_ssa_name (TREE_TYPE (orig_arg)); else { iter1 = create_tmp_var_raw (TREE_TYPE (orig_arg)); gimple_add_tmp_var (iter1); } gsi = gsi_after_labels (entry_bb); g = gimple_build_assign (iter1, orig_arg); gsi_insert_before (&gsi, g, GSI_NEW_STMT); gsi = gsi_after_labels (body_bb); g = gimple_build_assign (orig_arg, iter1); gsi_insert_before (&gsi, g, GSI_NEW_STMT); } else if (node->simdclone->args[i].arg_type == SIMD_CLONE_ARG_TYPE_UNIFORM && DECL_BY_REFERENCE (node->simdclone->args[i].orig_arg) && TREE_CODE (TREE_TYPE (node->simdclone->args[i].orig_arg)) == REFERENCE_TYPE && TREE_ADDRESSABLE (TREE_TYPE (TREE_TYPE (node->simdclone->args[i].orig_arg)))) { tree orig_arg = node->simdclone->args[i].orig_arg; tree def = ssa_default_def (cfun, orig_arg); if (def && !has_zero_uses (def)) { iter1 = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (orig_arg))); gimple_add_tmp_var (iter1); gsi = gsi_after_labels (entry_bb); g = gimple_build_assign (iter1, build_simple_mem_ref (def)); gsi_insert_before (&gsi, g, GSI_NEW_STMT); gsi = gsi_after_labels (body_bb); g = gimple_build_assign (build_simple_mem_ref (def), iter1); gsi_insert_before (&gsi, g, GSI_NEW_STMT); } } else if (node->simdclone->args[i].alignment && node->simdclone->args[i].arg_type == SIMD_CLONE_ARG_TYPE_UNIFORM && (node->simdclone->args[i].alignment & (node->simdclone->args[i].alignment - 1)) == 0 && TREE_CODE (TREE_TYPE (node->simdclone->args[i].orig_arg)) == POINTER_TYPE) { unsigned int alignment = node->simdclone->args[i].alignment; tree orig_arg = node->simdclone->args[i].orig_arg; tree def = ssa_default_def (cfun, orig_arg); if (def && !has_zero_uses (def)) { tree fn = builtin_decl_explicit (BUILT_IN_ASSUME_ALIGNED); gimple_seq seq = NULL; bool need_cvt = false; gcall *call = gimple_build_call (fn, 2, def, size_int (alignment)); g = call; if (!useless_type_conversion_p (TREE_TYPE (orig_arg), ptr_type_node)) need_cvt = true; tree t = make_ssa_name (need_cvt ? ptr_type_node : orig_arg); gimple_call_set_lhs (g, t); gimple_seq_add_stmt_without_update (&seq, g); if (need_cvt) { t = make_ssa_name (orig_arg); g = gimple_build_assign (t, NOP_EXPR, gimple_call_lhs (g)); gimple_seq_add_stmt_without_update (&seq, g); } gsi_insert_seq_on_edge_immediate (single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun)), seq); entry_bb = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)); node->create_edge (cgraph_node::get_create (fn), call, entry_bb->count); imm_use_iterator iter; use_operand_p use_p; gimple *use_stmt; tree repl = gimple_get_lhs (g); FOR_EACH_IMM_USE_STMT (use_stmt, iter, def) if (is_gimple_debug (use_stmt) || use_stmt == call) continue; else FOR_EACH_IMM_USE_ON_STMT (use_p, iter) SET_USE (use_p, repl); } } else if ((node->simdclone->args[i].arg_type == SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP) || (node->simdclone->args[i].arg_type == SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP) || (node->simdclone->args[i].arg_type == SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP) || (node->simdclone->args[i].arg_type == SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP)) { tree orig_arg = node->simdclone->args[i].orig_arg; gcc_assert (INTEGRAL_TYPE_P (TREE_TYPE (orig_arg)) || POINTER_TYPE_P (TREE_TYPE (orig_arg))); tree def = NULL_TREE; if (TREE_ADDRESSABLE (orig_arg)) { def = make_ssa_name (TREE_TYPE (orig_arg)); iter1 = make_ssa_name (TREE_TYPE (orig_arg)); if (incr_bb) iter2 = make_ssa_name (TREE_TYPE (orig_arg)); gsi = gsi_after_labels (entry_bb); g = gimple_build_assign (def, orig_arg); gsi_insert_before (&gsi, g, GSI_NEW_STMT); } else { def = ssa_default_def (cfun, orig_arg); if (!def || has_zero_uses (def)) def = NULL_TREE; else { iter1 = make_ssa_name (orig_arg); if (incr_bb) iter2 = make_ssa_name (orig_arg); } } if (def) { phi = create_phi_node (iter1, body_bb); add_phi_arg (phi, def, preheader_edge, UNKNOWN_LOCATION); if (incr_bb) { add_phi_arg (phi, iter2, latch_edge, UNKNOWN_LOCATION); enum tree_code code = INTEGRAL_TYPE_P (TREE_TYPE (orig_arg)) ? PLUS_EXPR : POINTER_PLUS_EXPR; tree addtype = INTEGRAL_TYPE_P (TREE_TYPE (orig_arg)) ? TREE_TYPE (orig_arg) : sizetype; tree addcst = simd_clone_linear_addend (node, i, addtype, entry_bb); gsi = gsi_last_bb (incr_bb); g = gimple_build_assign (iter2, code, iter1, addcst); gsi_insert_before (&gsi, g, GSI_SAME_STMT); } imm_use_iterator iter; use_operand_p use_p; gimple *use_stmt; if (TREE_ADDRESSABLE (orig_arg)) { gsi = gsi_after_labels (body_bb); g = gimple_build_assign (orig_arg, iter1); gsi_insert_before (&gsi, g, GSI_NEW_STMT); } else FOR_EACH_IMM_USE_STMT (use_stmt, iter, def) if (use_stmt == phi) continue; else FOR_EACH_IMM_USE_ON_STMT (use_p, iter) SET_USE (use_p, iter1); } } else if (node->simdclone->args[i].arg_type == SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP || (node->simdclone->args[i].arg_type == SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP)) { tree orig_arg = node->simdclone->args[i].orig_arg; tree def = ssa_default_def (cfun, orig_arg); gcc_assert (!TREE_ADDRESSABLE (orig_arg) && TREE_CODE (TREE_TYPE (orig_arg)) == REFERENCE_TYPE); if (def && !has_zero_uses (def)) { tree rtype = TREE_TYPE (TREE_TYPE (orig_arg)); iter1 = make_ssa_name (orig_arg); if (incr_bb) iter2 = make_ssa_name (orig_arg); tree iter3 = make_ssa_name (rtype); tree iter4 = make_ssa_name (rtype); tree iter5 = incr_bb ? make_ssa_name (rtype) : NULL_TREE; gsi = gsi_after_labels (entry_bb); gimple *load = gimple_build_assign (iter3, build_simple_mem_ref (def)); gsi_insert_before (&gsi, load, GSI_NEW_STMT); tree array = node->simdclone->args[i].simd_array; TREE_ADDRESSABLE (array) = 1; tree ptr = build_fold_addr_expr (array); phi = create_phi_node (iter1, body_bb); add_phi_arg (phi, ptr, preheader_edge, UNKNOWN_LOCATION); if (incr_bb) { add_phi_arg (phi, iter2, latch_edge, UNKNOWN_LOCATION); g = gimple_build_assign (iter2, POINTER_PLUS_EXPR, iter1, TYPE_SIZE_UNIT (TREE_TYPE (iter3))); gsi = gsi_last_bb (incr_bb); gsi_insert_before (&gsi, g, GSI_SAME_STMT); } phi = create_phi_node (iter4, body_bb); add_phi_arg (phi, iter3, preheader_edge, UNKNOWN_LOCATION); if (incr_bb) { add_phi_arg (phi, iter5, latch_edge, UNKNOWN_LOCATION); enum tree_code code = INTEGRAL_TYPE_P (TREE_TYPE (iter3)) ? PLUS_EXPR : POINTER_PLUS_EXPR; tree addtype = INTEGRAL_TYPE_P (TREE_TYPE (iter3)) ? TREE_TYPE (iter3) : sizetype; tree addcst = simd_clone_linear_addend (node, i, addtype, entry_bb); g = gimple_build_assign (iter5, code, iter4, addcst); gsi = gsi_last_bb (incr_bb); gsi_insert_before (&gsi, g, GSI_SAME_STMT); } g = gimple_build_assign (build_simple_mem_ref (iter1), iter4); gsi = gsi_after_labels (body_bb); gsi_insert_before (&gsi, g, GSI_SAME_STMT); imm_use_iterator iter; use_operand_p use_p; gimple *use_stmt; FOR_EACH_IMM_USE_STMT (use_stmt, iter, def) if (use_stmt == load) continue; else FOR_EACH_IMM_USE_ON_STMT (use_p, iter) SET_USE (use_p, iter1); if (!TYPE_READONLY (rtype) && incr_bb) { tree v = make_ssa_name (rtype); tree aref = build4 (ARRAY_REF, rtype, array, size_zero_node, NULL_TREE, NULL_TREE); gsi = gsi_after_labels (new_exit_bb); g = gimple_build_assign (v, aref); gsi_insert_before (&gsi, g, GSI_SAME_STMT); g = gimple_build_assign (build_simple_mem_ref (def), v); gsi_insert_before (&gsi, g, GSI_SAME_STMT); } } } calculate_dominance_info (CDI_DOMINATORS); if (loop) add_loop (loop, loop->header->loop_father); update_ssa (TODO_update_ssa); pop_cfun (); } /* If the function in NODE is tagged as an elemental SIMD function, create the appropriate SIMD clones. */ void expand_simd_clones (struct cgraph_node *node) { tree attr = lookup_attribute ("omp declare simd", DECL_ATTRIBUTES (node->decl)); if (attr == NULL_TREE || node->inlined_to || lookup_attribute ("noclone", DECL_ATTRIBUTES (node->decl))) return; /* Ignore #pragma omp declare simd extern int foo (); in C, there we don't know the argument types at all. */ if (!node->definition && TYPE_ARG_TYPES (TREE_TYPE (node->decl)) == NULL_TREE) return; /* Call this before creating clone_info, as it might ggc_collect. */ if (node->definition && node->has_gimple_body_p ()) node->get_body (); do { /* Start with parsing the "omp declare simd" attribute(s). */ bool inbranch_clause_specified; struct cgraph_simd_clone *clone_info = simd_clone_clauses_extract (node, TREE_VALUE (attr), &inbranch_clause_specified); if (clone_info == NULL) continue; int orig_simdlen = clone_info->simdlen; tree base_type = simd_clone_compute_base_data_type (node, clone_info); /* The target can return 0 (no simd clones should be created), 1 (just one ISA of simd clones should be created) or higher count of ISA variants. In that case, clone_info is initialized for the first ISA variant. */ int count = targetm.simd_clone.compute_vecsize_and_simdlen (node, clone_info, base_type, 0); if (count == 0) continue; /* Loop over all COUNT ISA variants, and if !INBRANCH_CLAUSE_SPECIFIED, also create one inbranch and one !inbranch clone of it. */ for (int i = 0; i < count * 2; i++) { struct cgraph_simd_clone *clone = clone_info; if (inbranch_clause_specified && (i & 1) != 0) continue; if (i != 0) { clone = simd_clone_struct_alloc (clone_info->nargs + ((i & 1) != 0)); simd_clone_struct_copy (clone, clone_info); /* Undo changes targetm.simd_clone.compute_vecsize_and_simdlen and simd_clone_adjust_argument_types did to the first clone's info. */ clone->nargs -= clone_info->inbranch; clone->simdlen = orig_simdlen; /* And call the target hook again to get the right ISA. */ targetm.simd_clone.compute_vecsize_and_simdlen (node, clone, base_type, i / 2); if ((i & 1) != 0) clone->inbranch = 1; } /* simd_clone_mangle might fail if such a clone has been created already. */ tree id = simd_clone_mangle (node, clone); if (id == NULL_TREE) { if (i == 0) clone->nargs += clone->inbranch; continue; } /* Only when we are sure we want to create the clone actually clone the function (or definitions) or create another extern FUNCTION_DECL (for prototypes without definitions). */ struct cgraph_node *n = simd_clone_create (node); if (n == NULL) { if (i == 0) clone->nargs += clone->inbranch; continue; } n->simdclone = clone; clone->origin = node; clone->next_clone = NULL; if (node->simd_clones == NULL) { clone->prev_clone = n; node->simd_clones = n; } else { clone->prev_clone = node->simd_clones->simdclone->prev_clone; clone->prev_clone->simdclone->next_clone = n; node->simd_clones->simdclone->prev_clone = n; } symtab->change_decl_assembler_name (n->decl, id); /* And finally adjust the return type, parameters and for definitions also function body. */ if (node->definition) simd_clone_adjust (n); else { TREE_TYPE (n->decl) = build_distinct_type_copy (TREE_TYPE (n->decl)); targetm.simd_clone.adjust (n); simd_clone_adjust_return_type (n); simd_clone_adjust_argument_types (n); } } } while ((attr = lookup_attribute ("omp declare simd", TREE_CHAIN (attr)))); } /* Entry point for IPA simd clone creation pass. */ static unsigned int ipa_omp_simd_clone (void) { struct cgraph_node *node; FOR_EACH_FUNCTION (node) expand_simd_clones (node); return 0; } namespace { const pass_data pass_data_omp_simd_clone = { SIMPLE_IPA_PASS, /* type */ "simdclone", /* name */ OPTGROUP_OMP, /* optinfo_flags */ TV_NONE, /* tv_id */ ( PROP_ssa | PROP_cfg ), /* properties_required */ 0, /* properties_provided */ 0, /* properties_destroyed */ 0, /* todo_flags_start */ 0, /* todo_flags_finish */ }; class pass_omp_simd_clone : public simple_ipa_opt_pass { public: pass_omp_simd_clone(gcc::context *ctxt) : simple_ipa_opt_pass(pass_data_omp_simd_clone, ctxt) {} /* opt_pass methods: */ virtual bool gate (function *); virtual unsigned int execute (function *) { return ipa_omp_simd_clone (); } }; bool pass_omp_simd_clone::gate (function *) { return targetm.simd_clone.compute_vecsize_and_simdlen != NULL; } } // anon namespace simple_ipa_opt_pass * make_pass_omp_simd_clone (gcc::context *ctxt) { return new pass_omp_simd_clone (ctxt); }
app.c
#include <assert.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <unistd.h> #include <stdint.h> #include <stdbool.h> #include <omp.h> #include <sys/mman.h> #include "../../support/matrix.h" #include "../../support/params.h" #include "../../support/timer.h" #include "../../support/utils.h" int main(int argc, char** argv) { if (mlockall(MCL_CURRENT | MCL_FUTURE)) { perror("mlockall failed:"); return 0; } // Process parameters struct Params p = input_params(argc, argv); // Initialize SpMV data structures PRINT_INFO(p.verbosity >= 1, "Reading matrix %s", p.fileName); struct COOMatrix cooMatrix = readCOOMatrix(p.fileName); PRINT_INFO(p.verbosity >= 1, " %u rows, %u columns, %u nonzeros", cooMatrix.numRows, cooMatrix.numCols, cooMatrix.numNonzeros); struct CSRMatrix csrMatrix = coo2csr(cooMatrix); float* inVector = malloc(csrMatrix.numCols*sizeof(float)); float* outVector = malloc(csrMatrix.numRows*sizeof(float)); float* outVector_hwacha = malloc(csrMatrix.numRows*sizeof(float)); float* outVector_multi = malloc(csrMatrix.numRows*sizeof(float)); initVector(inVector, csrMatrix.numCols); // Calculating result on CPU PRINT_INFO(p.verbosity >= 1, "Calculating result on CPU"); Timer timer; start(&timer, 0, 0); for(uint32_t rowIdx = 0; rowIdx < csrMatrix.numRows; ++rowIdx) { float sum = 0.0f; for(uint32_t i = csrMatrix.rowPtrs[rowIdx]; i < csrMatrix.rowPtrs[rowIdx + 1]; ++i) { uint32_t colIdx = csrMatrix.nonzeros[i].col; float value = csrMatrix.nonzeros[i].value; sum += inVector[colIdx]*value; } outVector[rowIdx] = sum; } stop(&timer, 0); start(&timer, 1, 0); for(uint32_t rowIdx = 0; rowIdx < csrMatrix.numRows; ++rowIdx) { float sum = 0.0f; uint32_t start = csrMatrix.rowPtrs[rowIdx]; uint32_t end = csrMatrix.rowPtrs[rowIdx + 1]; float * result = (float *)malloc(sizeof(float)*(end - start)); for (int i = 0; i < end - start; i++) result[i] = (float) 0.0; int size = vec_vvspmv_asm(end - start, result, inVector, &csrMatrix.nonzeros[start].col, &csrMatrix.nonzeros[start].value); for (int i = 0; i < size; i++) sum += result[i]; outVector_hwacha[rowIdx] = sum; free(result); } stop(&timer, 1); start(&timer, 2, 0); omp_set_num_threads(4); #pragma omp parallel for for(uint32_t rowIdx = 0; rowIdx < csrMatrix.numRows; ++rowIdx) { float sum = 0.0f; for(uint32_t i = csrMatrix.rowPtrs[rowIdx]; i < csrMatrix.rowPtrs[rowIdx + 1]; ++i) { uint32_t colIdx = csrMatrix.nonzeros[i].col; float value = csrMatrix.nonzeros[i].value; sum += inVector[colIdx]*value; } outVector_multi[rowIdx] = sum; } stop(&timer, 2); //if(p.verbosity == 0) PRINT("%f", getElapsedTime(timer)*1e3); //PRINT_INFO(p.verbosity >= 1, " Elapsed time: %f ms", getElapsedTime(timer)*1e3); bool hwacha_correct = true; bool multi_correct = true; for (int i = 0; i < csrMatrix.numRows; i++) { if (outVector[i] != outVector_hwacha[i]) { hwacha_correct = false; } if (outVector[i] != outVector_multi[i]) { multi_correct = false; } } printf("******************************\n"); if (hwacha_correct && multi_correct) { printf("Both correct.\n"); } else if (!hwacha_correct) { printf("Hwacha outputs wrong result!\n"); } else if (!multi_correct) { printf("Multi-threading outputs wrong result!\n"); } else { printf("Both wrong!\n"); } printf("******************************\n"); printf("CPU "); print(&timer, 0, 1); printf("\n"); printf("Hwacha "); print(&timer, 1, 1); printf("\n"); printf("4 threads "); print(&timer, 2, 1); printf("\n"); // Deallocate data structures freeCOOMatrix(cooMatrix); freeCSRMatrix(csrMatrix); free(inVector); free(outVector); munlockall(); return 0; }
irbuilder_unroll_full.c
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs // RUN: %clang_cc1 -fopenmp-enable-irbuilder -verify -fopenmp -fopenmp-version=51 -x c -triple x86_64-unknown-unknown -emit-llvm %s -o - | FileCheck %s // expected-no-diagnostics #ifndef HEADER #define HEADER // CHECK-LABEL: define {{.*}}@unroll_full( // CHECK-NEXT: [[ENTRY:.*]]: // CHECK-NEXT: %[[A_ADDR:.+]] = alloca float*, align 8 // CHECK-NEXT: %[[B_ADDR:.+]] = alloca float*, align 8 // CHECK-NEXT: %[[C_ADDR:.+]] = alloca float*, align 8 // CHECK-NEXT: %[[D_ADDR:.+]] = alloca float*, align 8 // CHECK-NEXT: %[[I:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[AGG_CAPTURED:.+]] = alloca %struct.anon, align 8 // CHECK-NEXT: %[[AGG_CAPTURED1:.+]] = alloca %struct.anon.0, align 4 // CHECK-NEXT: %[[DOTCOUNT_ADDR:.+]] = alloca i32, align 4 // CHECK-NEXT: store float* %[[A:.+]], float** %[[A_ADDR]], align 8 // CHECK-NEXT: store float* %[[B:.+]], float** %[[B_ADDR]], align 8 // CHECK-NEXT: store float* %[[C:.+]], float** %[[C_ADDR]], align 8 // CHECK-NEXT: store float* %[[D:.+]], float** %[[D_ADDR]], align 8 // CHECK-NEXT: store i32 0, i32* %[[I]], align 4 // CHECK-NEXT: %[[TMP0:.+]] = getelementptr inbounds %struct.anon, %struct.anon* %[[AGG_CAPTURED]], i32 0, i32 0 // CHECK-NEXT: store i32* %[[I]], i32** %[[TMP0]], align 8 // CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon.0, %struct.anon.0* %[[AGG_CAPTURED1]], i32 0, i32 0 // CHECK-NEXT: %[[TMP2:.+]] = load i32, i32* %[[I]], align 4 // CHECK-NEXT: store i32 %[[TMP2]], i32* %[[TMP1]], align 4 // CHECK-NEXT: call void @__captured_stmt(i32* %[[DOTCOUNT_ADDR]], %struct.anon* %[[AGG_CAPTURED]]) // CHECK-NEXT: %[[DOTCOUNT:.+]] = load i32, i32* %[[DOTCOUNT_ADDR]], align 4 // CHECK-NEXT: br label %[[OMP_LOOP_PREHEADER:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_LOOP_PREHEADER]]: // CHECK-NEXT: br label %[[OMP_LOOP_HEADER:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_LOOP_HEADER]]: // CHECK-NEXT: %[[OMP_LOOP_IV:.+]] = phi i32 [ 0, %[[OMP_LOOP_PREHEADER]] ], [ %[[OMP_LOOP_NEXT:.+]], %[[OMP_LOOP_INC:.+]] ] // CHECK-NEXT: br label %[[OMP_LOOP_COND:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_LOOP_COND]]: // CHECK-NEXT: %[[OMP_LOOP_CMP:.+]] = icmp ult i32 %[[OMP_LOOP_IV]], %[[DOTCOUNT]] // CHECK-NEXT: br i1 %[[OMP_LOOP_CMP]], label %[[OMP_LOOP_BODY:.+]], label %[[OMP_LOOP_EXIT:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_LOOP_BODY]]: // CHECK-NEXT: call void @__captured_stmt.1(i32* %[[I]], i32 %[[OMP_LOOP_IV]], %struct.anon.0* %[[AGG_CAPTURED1]]) // CHECK-NEXT: %[[TMP3:.+]] = load float*, float** %[[B_ADDR]], align 8 // CHECK-NEXT: %[[TMP4:.+]] = load i32, i32* %[[I]], align 4 // CHECK-NEXT: %[[IDXPROM:.+]] = sext i32 %[[TMP4]] to i64 // CHECK-NEXT: %[[ARRAYIDX:.+]] = getelementptr inbounds float, float* %[[TMP3]], i64 %[[IDXPROM]] // CHECK-NEXT: %[[TMP5:.+]] = load float, float* %[[ARRAYIDX]], align 4 // CHECK-NEXT: %[[TMP6:.+]] = load float*, float** %[[C_ADDR]], align 8 // CHECK-NEXT: %[[TMP7:.+]] = load i32, i32* %[[I]], align 4 // CHECK-NEXT: %[[IDXPROM2:.+]] = sext i32 %[[TMP7]] to i64 // CHECK-NEXT: %[[ARRAYIDX3:.+]] = getelementptr inbounds float, float* %[[TMP6]], i64 %[[IDXPROM2]] // CHECK-NEXT: %[[TMP8:.+]] = load float, float* %[[ARRAYIDX3]], align 4 // CHECK-NEXT: %[[MUL:.+]] = fmul float %[[TMP5]], %[[TMP8]] // CHECK-NEXT: %[[TMP9:.+]] = load float*, float** %[[D_ADDR]], align 8 // CHECK-NEXT: %[[TMP10:.+]] = load i32, i32* %[[I]], align 4 // CHECK-NEXT: %[[IDXPROM4:.+]] = sext i32 %[[TMP10]] to i64 // CHECK-NEXT: %[[ARRAYIDX5:.+]] = getelementptr inbounds float, float* %[[TMP9]], i64 %[[IDXPROM4]] // CHECK-NEXT: %[[TMP11:.+]] = load float, float* %[[ARRAYIDX5]], align 4 // CHECK-NEXT: %[[MUL6:.+]] = fmul float %[[MUL]], %[[TMP11]] // CHECK-NEXT: %[[TMP12:.+]] = load float*, float** %[[A_ADDR]], align 8 // CHECK-NEXT: %[[TMP13:.+]] = load i32, i32* %[[I]], align 4 // CHECK-NEXT: %[[IDXPROM7:.+]] = sext i32 %[[TMP13]] to i64 // CHECK-NEXT: %[[ARRAYIDX8:.+]] = getelementptr inbounds float, float* %[[TMP12]], i64 %[[IDXPROM7]] // CHECK-NEXT: store float %[[MUL6]], float* %[[ARRAYIDX8]], align 4 // CHECK-NEXT: br label %[[OMP_LOOP_INC]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_LOOP_INC]]: // CHECK-NEXT: %[[OMP_LOOP_NEXT]] = add nuw i32 %[[OMP_LOOP_IV]], 1 // CHECK-NEXT: br label %[[OMP_LOOP_HEADER]], !llvm.loop ![[LOOP3:[0-9]+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_LOOP_EXIT]]: // CHECK-NEXT: br label %[[OMP_LOOP_AFTER:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_LOOP_AFTER]]: // CHECK-NEXT: ret void // CHECK-NEXT: } void unroll_full(float *a, float *b, float *c, float *d) { #pragma omp unroll full for (int i = 0; i < 2; i++) { a[i] = b[i] * c[i] * d[i]; } } #endif // HEADER // CHECK-LABEL: define {{.*}}@__captured_stmt( // CHECK-NEXT: [[ENTRY:.*]]: // CHECK-NEXT: %[[DISTANCE_ADDR:.+]] = alloca i32*, align 8 // CHECK-NEXT: %[[__CONTEXT_ADDR:.+]] = alloca %struct.anon*, align 8 // CHECK-NEXT: %[[DOTSTART:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[DOTSTOP:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[DOTSTEP:.+]] = alloca i32, align 4 // CHECK-NEXT: store i32* %[[DISTANCE:.+]], i32** %[[DISTANCE_ADDR]], align 8 // CHECK-NEXT: store %struct.anon* %[[__CONTEXT:.+]], %struct.anon** %[[__CONTEXT_ADDR]], align 8 // CHECK-NEXT: %[[TMP0:.+]] = load %struct.anon*, %struct.anon** %[[__CONTEXT_ADDR]], align 8 // CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon, %struct.anon* %[[TMP0]], i32 0, i32 0 // CHECK-NEXT: %[[TMP2:.+]] = load i32*, i32** %[[TMP1]], align 8 // CHECK-NEXT: %[[TMP3:.+]] = load i32, i32* %[[TMP2]], align 4 // CHECK-NEXT: store i32 %[[TMP3]], i32* %[[DOTSTART]], align 4 // CHECK-NEXT: store i32 2, i32* %[[DOTSTOP]], align 4 // CHECK-NEXT: store i32 1, i32* %[[DOTSTEP]], align 4 // CHECK-NEXT: %[[TMP4:.+]] = load i32, i32* %[[DOTSTART]], align 4 // CHECK-NEXT: %[[TMP5:.+]] = load i32, i32* %[[DOTSTOP]], align 4 // CHECK-NEXT: %[[CMP:.+]] = icmp slt i32 %[[TMP4]], %[[TMP5]] // CHECK-NEXT: br i1 %[[CMP]], label %[[COND_TRUE:.+]], label %[[COND_FALSE:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[COND_TRUE]]: // CHECK-NEXT: %[[TMP6:.+]] = load i32, i32* %[[DOTSTOP]], align 4 // CHECK-NEXT: %[[TMP7:.+]] = load i32, i32* %[[DOTSTART]], align 4 // CHECK-NEXT: %[[SUB:.+]] = sub nsw i32 %[[TMP6]], %[[TMP7]] // CHECK-NEXT: %[[TMP8:.+]] = load i32, i32* %[[DOTSTEP]], align 4 // CHECK-NEXT: %[[SUB1:.+]] = sub i32 %[[TMP8]], 1 // CHECK-NEXT: %[[ADD:.+]] = add i32 %[[SUB]], %[[SUB1]] // CHECK-NEXT: %[[TMP9:.+]] = load i32, i32* %[[DOTSTEP]], align 4 // CHECK-NEXT: %[[DIV:.+]] = udiv i32 %[[ADD]], %[[TMP9]] // CHECK-NEXT: br label %[[COND_END:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[COND_FALSE]]: // CHECK-NEXT: br label %[[COND_END]] // CHECK-EMPTY: // CHECK-NEXT: [[COND_END]]: // CHECK-NEXT: %[[COND:.+]] = phi i32 [ %[[DIV]], %[[COND_TRUE]] ], [ 0, %[[COND_FALSE]] ] // CHECK-NEXT: %[[TMP10:.+]] = load i32*, i32** %[[DISTANCE_ADDR]], align 8 // CHECK-NEXT: store i32 %[[COND]], i32* %[[TMP10]], align 4 // CHECK-NEXT: ret void // CHECK-NEXT: } // CHECK-LABEL: define {{.*}}@__captured_stmt.1( // CHECK-NEXT: [[ENTRY:.*]]: // CHECK-NEXT: %[[LOOPVAR_ADDR:.+]] = alloca i32*, align 8 // CHECK-NEXT: %[[LOGICAL_ADDR:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[__CONTEXT_ADDR:.+]] = alloca %struct.anon.0*, align 8 // CHECK-NEXT: store i32* %[[LOOPVAR:.+]], i32** %[[LOOPVAR_ADDR]], align 8 // CHECK-NEXT: store i32 %[[LOGICAL:.+]], i32* %[[LOGICAL_ADDR]], align 4 // CHECK-NEXT: store %struct.anon.0* %[[__CONTEXT:.+]], %struct.anon.0** %[[__CONTEXT_ADDR]], align 8 // CHECK-NEXT: %[[TMP0:.+]] = load %struct.anon.0*, %struct.anon.0** %[[__CONTEXT_ADDR]], align 8 // CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon.0, %struct.anon.0* %[[TMP0]], i32 0, i32 0 // CHECK-NEXT: %[[TMP2:.+]] = load i32, i32* %[[TMP1]], align 4 // CHECK-NEXT: %[[TMP3:.+]] = load i32, i32* %[[LOGICAL_ADDR]], align 4 // CHECK-NEXT: %[[MUL:.+]] = mul i32 1, %[[TMP3]] // CHECK-NEXT: %[[ADD:.+]] = add i32 %[[TMP2]], %[[MUL]] // CHECK-NEXT: %[[TMP4:.+]] = load i32*, i32** %[[LOOPVAR_ADDR]], align 8 // CHECK-NEXT: store i32 %[[ADD]], i32* %[[TMP4]], align 4 // CHECK-NEXT: ret void // CHECK-NEXT: } // CHECK: ![[META0:[0-9]+]] = !{i32 1, !"wchar_size", i32 4} // CHECK: ![[META1:[0-9]+]] = !{i32 7, !"openmp", i32 51} // CHECK: ![[META2:[0-9]+]] = // CHECK: ![[LOOP3]] = distinct !{![[LOOP3]], ![[LOOPPROP4:[0-9]+]], ![[LOOPPROP5:[0-9]+]]} // CHECK: ![[LOOPPROP4]] = !{!"llvm.loop.unroll.enable"} // CHECK: ![[LOOPPROP5]] = !{!"llvm.loop.unroll.full"}
broadcast_reduce-inl.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file broadcast_reduce-inl.h * \brief CPU-specific Function definition of broadcast and reduce operators */ #ifndef MXNET_OPERATOR_TENSOR_BROADCAST_REDUCE_INL_H_ #define MXNET_OPERATOR_TENSOR_BROADCAST_REDUCE_INL_H_ #include <mxnet/operator_util.h> #include <algorithm> #include <vector> #include <string> #include <utility> #include "../mshadow_op.h" #include "../mxnet_op.h" #include "../operator_common.h" namespace mxnet { namespace op { namespace mxnet_op { template <int ndim, typename OP> struct binary_broadcast_kernel { /*! \brief Map function for binary_broadcast_kernel */ template <typename IType, typename DType> MSHADOW_XINLINE static void Map(index_t base, index_t length, OpReqType req, const Shape<ndim>& lstride, const Shape<ndim>& rstride, const Shape<ndim>& oshape, IType* lhs, IType* rhs, DType* out) { Shape<ndim> coord = unravel(base, oshape); auto lidx = static_cast<index_t>(dot(coord, lstride)); auto ridx = static_cast<index_t>(dot(coord, rstride)); KERNEL_ASSIGN(out[base], req, OP::Map(lhs[lidx], rhs[ridx])); // starts from 1 to avoid extra inc at end of loop for (index_t i = 1; i < length; ++i) { inc(&coord, oshape, &lidx, lstride, &ridx, rstride); // When tuning, don't actually run the op, since it's not going to be tuned against // the actual op we'll eventually be using KERNEL_ASSIGN(out[base + i], req, OP::Map(lhs[lidx], rhs[ridx])); } } /*! \brief Map function for binary_broadcast_kernel */ template <typename LType, typename RType, typename OType> MSHADOW_XINLINE static void Map(index_t base, index_t length, OpReqType req, const Shape<ndim>& lstride, const Shape<ndim>& rstride, const Shape<ndim>& oshape, LType* lhs, RType* rhs, OType* out) { Shape<ndim> coord = unravel(base, oshape); auto lidx = static_cast<index_t>(dot(coord, lstride)); auto ridx = static_cast<index_t>(dot(coord, rstride)); KERNEL_ASSIGN(out[base], req, OP::Map(lhs[lidx], rhs[ridx])); // starts from 1 to avoid extra inc at end of loop for (index_t i = 1; i < length; ++i) { inc(&coord, oshape, &lidx, lstride, &ridx, rstride); // When tuning, don't actually run the op, since it's not going to be tuned against // the actual op we'll eventually be using KERNEL_ASSIGN(out[base + i], req, OP::Map(lhs[lidx], rhs[ridx])); } } /*! \brief Map function for binary_broadcast_kernel */ template <typename IType, typename DType> MSHADOW_XINLINE static void Map(index_t base, index_t length, OpReqType req, const Shape<ndim>& lstride, const Shape<ndim>& rstride, const Shape<ndim>& oshape, IType lhs, IType* rhs, DType* out) { Shape<ndim> coord = unravel(base, oshape); auto lidx = static_cast<index_t>(dot(coord, lstride)); auto ridx = static_cast<index_t>(dot(coord, rstride)); KERNEL_ASSIGN(out[base], req, OP::Map(lhs, rhs[ridx])); // starts from 1 to avoid extra inc at end of loop for (index_t i = 1; i < length; ++i) { inc(&coord, oshape, &lidx, lstride, &ridx, rstride); // When tuning, don't actually run the op, since it's not going to be tuned against // the actual op we'll eventually be using KERNEL_ASSIGN(out[base + i], req, OP::Map(lhs, rhs[ridx])); } } /*! \brief Map function for binary_broadcast_kernel */ /* used for mixed type binary ops */ template <typename IType, typename DType, typename std::enable_if<!std::is_same<IType, DType>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t base, index_t length, OpReqType req, const Shape<ndim>& lstride, const Shape<ndim>& rstride, const Shape<ndim>& oshape, IType* lhs, DType* rhs, DType* out) { Shape<ndim> coord = unravel(base, oshape); auto lidx = static_cast<index_t>(dot(coord, lstride)); auto ridx = static_cast<index_t>(dot(coord, rstride)); KERNEL_ASSIGN(out[base], req, OP::Map(lhs[lidx], rhs[ridx])); // starts from 1 to avoid extra inc at end of loop for (index_t i = 1; i < length; ++i) { inc(&coord, oshape, &lidx, lstride, &ridx, rstride); // When tuning, don't actually run the op, since it's not going to be tuned against // the actual op we'll eventually be using KERNEL_ASSIGN(out[base + i], req, OP::Map(lhs[lidx], rhs[ridx])); } } /*! \brief Map function for binary_broadcast_kernel */ /* used for mixed type binary ops */ template < typename IType, typename DType, typename std::enable_if<!std::is_same<IType, DType>::value && !std::is_pointer<IType>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t base, index_t length, OpReqType req, const Shape<ndim>& lstride, const Shape<ndim>& rstride, const Shape<ndim>& oshape, IType lhs, DType* rhs, DType* out) { Shape<ndim> coord = unravel(base, oshape); auto lidx = static_cast<index_t>(dot(coord, lstride)); auto ridx = static_cast<index_t>(dot(coord, rstride)); KERNEL_ASSIGN(out[base], req, OP::Map(lhs, rhs[ridx])); // starts from 1 to avoid extra inc at end of loop for (index_t i = 1; i < length; ++i) { inc(&coord, oshape, &lidx, lstride, &ridx, rstride); // When tuning, don't actually run the op, since it's not going to be tuned against // the actual op we'll eventually be using KERNEL_ASSIGN(out[base + i], req, OP::Map(lhs, rhs[ridx])); } } }; template <int req, typename OP, bool col_vec> struct csr_dns_csr_broadcast_kernel { /*! * \brief Map function for broadcast between csr and 1D vector * \param row global thread id/assigned row id * \param csr_data ptr to data buffer of csr matrix * \param csr_indices ptr to indices buffer of csr matrix * \param csr_indptr ptr to indptr buffer of csr matrix * \param dns ptr to data buffer of the dense vector * \param out ptr to the data buffer of the result csr matrix */ template <typename DType, typename CType, typename RType> MSHADOW_XINLINE static void Map(index_t row, const DType* csr_data, const CType* csr_indices, const RType* csr_indptr, const DType* dns, DType* out) { const nnvm::dim_t curr_row_i = csr_indptr[row]; const nnvm::dim_t next_row_i = csr_indptr[row + 1]; for (nnvm::dim_t iter = curr_row_i; iter < next_row_i; iter++) { KERNEL_ASSIGN( out[iter], req, OP::Map(csr_data[iter], (col_vec) ? dns[row] : dns[csr_indices[iter]])); } } /*! * \brief Map function for broadcast between csr and a scalar * \param i global thread id * \param csr_data ptr to data buffer of csr matrix * \param scalar_ptr ptr to data buffer of the scalar tensor, only the 0-th element is used * \param out ptr to the data buffer of output csr matrix * \param nnz number of non-zero elements in input csr matrix */ template <typename DType> MSHADOW_XINLINE static void Map(index_t i, const DType* csr_data, const DType* scalar_ptr, DType* out, const nnvm::dim_t nnz) { const DType scale = scalar_ptr[0]; if (i < nnz) { KERNEL_ASSIGN(out[i], req, OP::Map(csr_data[i], scale)); } } }; template <int req, typename OP, bool reverse = false> struct csr_dns_map_kernel { template <typename DType, typename CType, typename RType> MSHADOW_XINLINE static void Map(index_t row, const DType* csr_data, const CType* csr_indices, const RType* csr_indptr, DType* out, const nnvm::dim_t num_rows, const nnvm::dim_t num_cols) { if (row < num_rows) { const nnvm::dim_t curr_row_i = csr_indptr[row]; const nnvm::dim_t next_row_i = csr_indptr[row + 1]; for (nnvm::dim_t iter = curr_row_i; iter < next_row_i; iter++) { const nnvm::dim_t target = row * num_cols + csr_indices[iter]; KERNEL_ASSIGN( out[target], req, reverse ? OP::Map(out[target], csr_data[iter]) : OP::Map(csr_data[iter], out[target])); } } } }; } // namespace mxnet_op namespace broadcast { using namespace mshadow; const int MAX_DIM = 5; template <int ndim> MSHADOW_XINLINE void unravel_dot(const index_t idx, const Shape<ndim>& shape, const Shape<ndim>& stridej, const Shape<ndim>& stridek, index_t* j, index_t* k) { *j = 0; *k = 0; #pragma unroll for (index_t i = ndim - 1, idx_t = idx; i >= 0; --i) { const auto tmp = idx_t / shape[i]; const auto coord = idx_t - tmp * shape[i]; *j += coord * stridej[i]; *k += coord * stridek[i]; idx_t = tmp; } } template <int ndim> MSHADOW_XINLINE int diff(const Shape<ndim>& small, const Shape<ndim>& big, Shape<ndim>* dims, Shape<ndim>* stride) { int mdim = 0; #pragma unroll for (int i = 0; i < ndim; ++i) { mdim += small[i] != big[i]; (*dims)[i] = (*stride)[i] = 1; } index_t s = 1; #pragma unroll for (int i = ndim - 1, j = mdim; i >= 0; --i) { if (small[i] != big[i]) { --j; (*stride)[j] = s; (*dims)[j] = big[i]; } s *= big[i]; } return mdim; } template <typename DType> MSHADOW_XINLINE void assign(DType* dst, const bool addto, const DType src) { if (addto) { *dst += src; } else { *dst = src; } } template <int ndim, typename DType, typename OP> MSHADOW_XINLINE void binary_broadcast_assign(const index_t idx, const bool addto, const DType* __restrict lhs, const DType* __restrict rhs, DType* out, const Shape<ndim>& lshape, const Shape<ndim>& rshape, const Shape<ndim>& oshape) { const Shape<ndim> coord = mxnet_op::unravel(idx, oshape); const index_t j = mxnet_op::ravel(coord, lshape); const index_t k = mxnet_op::ravel(coord, rshape); assign(&out[idx], addto, OP::Map(lhs[j], rhs[k])); } template <typename Reducer, int ndim, typename AType, typename DType, typename OType, typename OP, typename IndexOP = mxnet::op::mshadow_op::set_index_no_op<AType, index_t>> MSHADOW_XINLINE std::pair<AType, AType> seq_reduce_assign_block(size_t start, size_t len, size_t j, const DType* __restrict big, const Shape<ndim>& rshape, const Shape<ndim>& rstride) { Shape<ndim> coord; AType val, residual; Reducer::SetInitValue(val, residual); for (size_t k = start; k < start + len; ++k) { coord = mxnet_op::unravel(k, rshape); AType temp = OP::Map(big[j + mxnet_op::dot(coord, rstride)]); if (IndexOP::do_op) IndexOP::Op(&temp, k); Reducer::Reduce(val, temp, residual); } return std::make_pair(val, residual); } template <typename Reducer, int ndim, typename AType, typename DType, typename OType, typename OP, typename IndexOP = mxnet::op::mshadow_op::set_index_no_op<AType, index_t>> MSHADOW_XINLINE void seq_reduce_assign(const index_t idx, const size_t M, const bool addto, const DType* __restrict big, OType* small, const Shape<ndim>& bshape, const Shape<ndim>& sshape, const Shape<ndim>& rshape, const Shape<ndim>& rstride, const bool use_omp = false) { Shape<ndim> coord = mxnet_op::unravel(idx, sshape); index_t j = mxnet_op::ravel(coord, bshape); AType val, residual; Reducer::SetInitValue(val, residual); if (!use_omp) { for (size_t k = 0; k < M; ++k) { coord = mxnet_op::unravel(k, rshape); AType temp = OP::Map(big[j + mxnet_op::dot(coord, rstride)]); // argmin/max, set IndexedNum.idx if (IndexOP::do_op) IndexOP::Op(&temp, k); Reducer::Reduce(val, temp, residual); } } else { const int thread_count = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); auto pairs = std::make_unique<std::pair<AType, AType>[]>(thread_count); #pragma omp parallel for num_threads(thread_count) for (int i = 0; i < thread_count; ++i) { pairs[i] = seq_reduce_assign_block<Reducer, ndim, AType, DType, OType, OP, IndexOP>( i * (M / thread_count), i < (thread_count - 1) ? (M / thread_count) : (M / thread_count) + M % thread_count, j, big, rshape, rstride); } for (int i = 0; i < thread_count; ++i) { Reducer::Merge(val, residual, pairs[i].first, pairs[i].second); } } Reducer::Finalize(val, residual); assign(&small[idx], addto, OType(val)); } namespace { // Returns the stride with which the fastest dimension is moving. // Used to detect memory access scatter. inline int fastest_stride(const TShape& small, const TShape& big, const TShape& big_stride) { const int ndim = small.ndim(); for (int i = ndim - 1; i >= 0; --i) { if (big[i] != 1) { return (small[i] == big[i]) ? 1 : big_stride[i]; } } return 1; } } // namespace template <int ndim, typename DType, typename OP> void BinaryBroadcastComputeImpl(Stream<cpu>* s, const OpReqType req, const TBlob& lhs, const TBlob& rhs, const TBlob& out) { mshadow::Shape<ndim> oshape = out.shape_.get<ndim>(); mshadow::Shape<ndim> lstride = mxnet_op::calc_stride(lhs.shape_.get<ndim>()); mshadow::Shape<ndim> rstride = mxnet_op::calc_stride(rhs.shape_.get<ndim>()); mxnet_op::Kernel<mxnet_op::binary_broadcast_kernel<ndim, OP>, cpu>::template LaunchEx( s, out.shape_.Size(), req, lstride, rstride, oshape, lhs.dptr<DType>(), rhs.dptr<DType>(), out.dptr<DType>()); } template <typename Reducer, int ndim, typename AType, typename DType, typename OType, typename OP, typename IndexOP = mxnet::op::mshadow_op::set_index_no_op<AType, index_t>> void seq_reduce_compute(const size_t N, const size_t M, const bool addto, const DType* big, OType* small, const Shape<ndim> bshape, const Shape<ndim> sshape, const Shape<ndim> rshape, const Shape<ndim> rstride) { const int thread_count = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); if (N >= thread_count) { #pragma omp parallel for num_threads(thread_count) for (index_t idx = 0; idx < static_cast<index_t>(N); ++idx) { seq_reduce_assign<Reducer, ndim, AType, DType, OType, OP, IndexOP>( idx, M, addto, big, small, bshape, sshape, rshape, rstride, false); } } else { for (index_t idx = 0; idx < static_cast<index_t>(N); ++idx) { seq_reduce_assign<Reducer, ndim, AType, DType, OType, OP, IndexOP>( idx, M, addto, big, small, bshape, sshape, rshape, rstride, true); } } } template <typename Reducer, int ndim, typename DType, typename OP> void seq_reduce_compute_extra_mem(const size_t N, const size_t M, const bool addto, const DType* big, DType* small, const Shape<ndim> bshape, const Shape<ndim> sshape, const Shape<ndim> rshape, const Shape<ndim> rstride, const index_t* ws_dptr) { #pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount()) for (index_t idx = 0; idx < static_cast<index_t>(N); ++idx) { Shape<ndim> coord = mxnet_op::unravel(idx, sshape); index_t j = mxnet_op::ravel(coord, bshape); DType val, residual; Reducer::SetInitValue(val, residual); for (size_t k = 0; k < M; ++k) { Reducer::Reduce(val, OP::Map(big[j + ws_dptr[k]]), residual); } assign(&small[idx], addto, val); } } template <typename Reducer, int ndim, typename DType, typename OP, bool safe_acc = false> void Reduce(Stream<cpu>* s, const TBlob& small, const OpReqType req, const Tensor<cpu, 1, char>& workspace, const TBlob& big) { if (req == kNullOp) return; Shape<ndim> rshape, rstride; diff(small.shape_.get<ndim>(), big.shape_.get<ndim>(), &rshape, &rstride); size_t N = small.shape_.Size(), M = rshape.Size(); if (!safe_acc) { seq_reduce_compute<Reducer, ndim, DType, DType, DType, OP>(N, M, req == kAddTo, big.dptr<DType>(), small.dptr<DType>(), big.shape_.get<ndim>(), small.shape_.get<ndim>(), rshape, rstride); } else { MXNET_ACC_TYPE_SWITCH(mshadow::DataType<DType>::kFlag, DataType, AType, { typedef typename std::conditional<safe_acc, AType, DataType>::type AccType; MSHADOW_TYPE_SWITCH_WITH_BOOL(small.type_flag_, OType, { typedef typename std::conditional<safe_acc, OType, DataType>::type OutType; seq_reduce_compute<Reducer, ndim, AccType, DataType, OutType, OP>(N, M, req == kAddTo, big.dptr<DataType>(), small.dptr<OutType>(), big.shape_.get<ndim>(), small.shape_.get<ndim>(), rshape, rstride); }); }); } } template <typename Reducer, int ndim, typename DType, typename OP> void ReduceBool(Stream<cpu>* s, const TBlob& small, const OpReqType req, const Tensor<cpu, 1, char>& workspace, const TBlob& big) { if (req == kNullOp) return; Shape<ndim> rshape, rstride; diff(small.shape_.get<ndim>(), big.shape_.get<ndim>(), &rshape, &rstride); size_t N = small.shape_.Size(), M = rshape.Size(); seq_reduce_compute<Reducer, ndim, bool, DType, bool, OP>(N, M, req == kAddTo, big.dptr<DType>(), small.dptr<bool>(), big.shape_.get<ndim>(), small.shape_.get<ndim>(), rshape, rstride); } template <typename Reducer, int ndim, typename DType, typename OP> void ReduceWithExtraMem(Stream<cpu>* s, const TBlob& small, const OpReqType req, const Tensor<cpu, 1, char>& workspace, const TBlob& big) { using namespace mxnet_op; if (req == kNullOp) return; Shape<ndim> rshape, rstride; diff(small.shape_.get<ndim>(), big.shape_.get<ndim>(), &rshape, &rstride); index_t* ws_dptr = reinterpret_cast<index_t*>(workspace.dptr_); size_t N = small.shape_.Size(), M = rshape.Size(); #pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount()) for (index_t k = 0; k < static_cast<index_t>(M); k++) { Shape<ndim> coord = mxnet_op::unravel(k, rshape); ws_dptr[k] = mxnet_op::dot(coord, rstride); } seq_reduce_compute_extra_mem<Reducer, ndim, DType, OP>(N, M, req == kAddTo, big.dptr<DType>(), small.dptr<DType>(), big.shape_.get<ndim>(), small.shape_.get<ndim>(), rshape, rstride, ws_dptr); } inline size_t ReduceWorkspaceSize(Stream<cpu>* s, const mxnet::TShape& small, const OpReqType req, const mxnet::TShape& big) { return 0; } inline size_t ReduceWorkspaceSize(Stream<cpu>* s, const mxnet::TShape& small, const OpReqType req, const mxnet::TShape& big, const mxnet::TShape& lhs, const mxnet::TShape& rhs) { return 0; } #if MXNET_USE_CUDA namespace { constexpr int warpSize = 32; constexpr int unroll_reduce = 2; // Returns a/b integer division rounded up template <typename Type> Type ceil_idiv(const Type a, const Type b) { return (a + b - 1) / b; } uint64_t calc_num_load(const int X, const int Y, const int* strides) { // Number of full warps uint64_t num_full_warp = X / warpSize; // Length of the partial warp i.e. number of threads that are performing loads uint64_t len_part_warp = X % warpSize; uint64_t num_load_full = (std::min(warpSize, strides[0]) + std::min(warpSize, strides[1]) + std::min(warpSize, strides[2])) * num_full_warp; uint64_t num_load_part = (std::min(len_part_warp, ceil_idiv<uint64_t>(len_part_warp * strides[0], warpSize)) + std::min(len_part_warp, ceil_idiv<uint64_t>(len_part_warp * strides[1], warpSize)) + std::min(len_part_warp, ceil_idiv<uint64_t>(len_part_warp * strides[2], warpSize))) * (len_part_warp != 0); uint64_t num_load = (num_load_full + num_load_part) * (uint64_t)Y; return num_load; } inline int diff(const TShape& small, const TShape& big, TShape* dims, TShape* stride) { int ndim = small.ndim(); int mdim = 0; #pragma unroll for (int i = 0; i < ndim; ++i) { mdim += small[i] != big[i]; (*dims)[i] = (*stride)[i] = 1; } index_t s = 1; #pragma unroll for (int i = ndim - 1, j = mdim; i >= 0; --i) { if (small[i] != big[i]) { --j; (*stride)[j] = s; (*dims)[j] = big[i]; } s *= big[i]; } return mdim; } constexpr int nthread_reduce = 512; constexpr index_t kBaseGridNum = 1024; } // namespace // Configuration for ReduceImpl() struct ReduceImplConfig { index_t N; index_t M; index_t Mnext; struct { dim3 blockDim; dim3 gridDim; int shMemSize; bool do_transpose; } kernel_1; struct { int blockSize; int gridSize; } kernel_2; size_t workspace_size; TShape rshape, rstride; TShape lhs_shape, lhs_stride; TShape rhs_shape, rhs_stride; inline ReduceImplConfig(const ::mxnet::TShape& small, const ::mxnet::TShape& big, const ::mxnet::TShape* lhs, const ::mxnet::TShape* rhs) : rshape(small.ndim(), 1), rstride(small.ndim(), 1), lhs_shape(small.ndim(), 1), lhs_stride(small.ndim(), 1), rhs_shape(small.ndim(), 1), rhs_stride(small.ndim(), 1) { // The largest reduction type currently is (index_t, double) struct // aligned to 16B constexpr size_t max_type_size = 2 * sizeof(double); constexpr int maxLoopPerTB = 64; int ndim = small.ndim(); diff(small, big, &rshape, &rstride); N = small.Size(); M = rshape[0]; for (int i = 1; i < ndim; ++i) { M *= rshape[i]; } bool multiOp = false; if (lhs != nullptr) { CHECK_NOTNULL(rhs); diff(small, *lhs, &lhs_shape, &lhs_stride); diff(small, *rhs, &rhs_shape, &rhs_stride); multiOp = true; } workspace_size = 0; kernel_1.shMemSize = 0; kernel_1.do_transpose = false; if (M == 1) { kernel_1.blockDim.x = nthread_reduce; kernel_1.gridDim.x = std::min( kBaseGridNum, static_cast<index_t>((N + kernel_1.blockDim.x - 1) / kernel_1.blockDim.x)); } else { int reduce_strides[3]; reduce_strides[0] = fastest_stride(small, big, big); reduce_strides[1] = (multiOp) ? fastest_stride(small, *lhs, *lhs) : 1; reduce_strides[2] = (multiOp) ? fastest_stride(small, *rhs, *rhs) : 1; int reduce_strides_transp[3]; reduce_strides_transp[0] = fastest_stride(small, rshape, rstride); reduce_strides_transp[1] = (multiOp) ? fastest_stride(small, lhs_shape, lhs_stride) : 1; reduce_strides_transp[2] = (multiOp) ? fastest_stride(small, rhs_shape, rhs_stride) : 1; uint64_t num_load = calc_num_load(N, M, reduce_strides); uint64_t num_load_transp = calc_num_load(M, N, reduce_strides_transp); Mnext = 1; kernel_1.do_transpose = (num_load > num_load_transp); kernel_1.blockDim.x = 0; kernel_1.blockDim.y = 0; if (kernel_1.do_transpose) { // Fastest thread ID goes through M // Loop over N has step size kernel_1.blockDim.y if (N < 8) { kernel_1.blockDim.y = 1; } else if (N < 256) { kernel_1.blockDim.y = 4; } else { if (M < 8) { kernel_1.blockDim.x = 1; } else if (M < 256) { kernel_1.blockDim.x = 4; } else { kernel_1.blockDim.x = warpSize; } } } else { // Fastest thread ID goes through N // Loop over M has step size kernel_1.blockDim.y if (M < 8) { kernel_1.blockDim.y = 1; } else if (M < 256) { kernel_1.blockDim.y = 4; } else { if (N < 8) { kernel_1.blockDim.x = 1; } else if (N < 256) { kernel_1.blockDim.x = 4; } else { kernel_1.blockDim.x = warpSize; } } } if (kernel_1.blockDim.x == 0 && kernel_1.blockDim.y == 0) { LOG(FATAL) << "Unable to set blockDim"; } else if (kernel_1.blockDim.x == 0) { kernel_1.blockDim.x = nthread_reduce / kernel_1.blockDim.y; } else if (kernel_1.blockDim.y == 0) { kernel_1.blockDim.y = nthread_reduce / kernel_1.blockDim.x; } if (kernel_1.do_transpose) { // Fastest thread ID goes through M kernel_1.gridDim.x = std::min((unsigned int)kBaseGridNum, ceil_idiv<unsigned int>(N, kernel_1.blockDim.y)); kernel_1.gridDim.y = std::min(kBaseGridNum, Mnext); int by = kernel_1.blockDim.y; if (kernel_1.blockDim.y % warpSize == 0) { // Fix shared memory bank conflict by++; } kernel_1.shMemSize = (kernel_1.blockDim.x > 1) ? kernel_1.blockDim.x * by * max_type_size * 2 : 0; // Maximum number of times we want TB to loop in M // Max size of M-block each TB can handle int maxMblock = kernel_1.blockDim.x * maxLoopPerTB; Mnext = (M + maxMblock - 1) / maxMblock; } else { // Fastest thread ID goes through N kernel_1.gridDim.x = std::min((unsigned int)kBaseGridNum, ceil_idiv<unsigned int>(N, kernel_1.blockDim.x)); kernel_1.gridDim.y = std::min(kBaseGridNum, Mnext); kernel_1.shMemSize = (kernel_1.blockDim.y > 1) ? kernel_1.blockDim.x * kernel_1.blockDim.y * max_type_size * 2 : 0; // Maximum number of times we want TB to loop in M // Max size of M-block each TB can handle int maxMblock = kernel_1.blockDim.y * maxLoopPerTB; Mnext = (M + maxMblock - 1) / maxMblock; } if (Mnext > 1) { // small_dptr[] is N*Mnext*type_size bytes workspace_size += N * Mnext * max_type_size; // Set gridDim.y to Mnext kernel_1.gridDim.y = std::min(kBaseGridNum, Mnext); } if (Mnext > 1) { kernel_2.blockSize = nthread_reduce; kernel_2.gridSize = std::min( kBaseGridNum, static_cast<index_t>((N + kernel_2.blockSize - 1) / kernel_2.blockSize)); } } } }; inline size_t ReduceWorkspaceSize(Stream<gpu>* s, const ::mxnet::TShape& small, const OpReqType req, const ::mxnet::TShape& big) { if (req == kNullOp) return 0; ReduceImplConfig config(small, big, nullptr, nullptr); return config.workspace_size; } inline size_t ReduceWorkspaceSize(Stream<gpu>* s, const ::mxnet::TShape& small, const OpReqType req, const ::mxnet::TShape& big, const ::mxnet::TShape& lhs, const ::mxnet::TShape& rhs) { if (req == kNullOp) return 0; ReduceImplConfig config(small, big, &lhs, &rhs); return config.workspace_size; } #endif // MXNET_USE_CUDA template <typename Reducer, int ndim, typename DType, typename OP1, typename OP2> MSHADOW_XINLINE void seq_reduce_assign(const index_t idx, const size_t M, const bool addto, const DType* __restrict big, const DType* __restrict lhs, const DType* __restrict rhs, DType* small, const Shape<ndim>& big_shape, const Shape<ndim>& lhs_shape0, const Shape<ndim>& rhs_shape0, const Shape<ndim>& small_shape, const Shape<ndim>& rshape, const Shape<ndim>& lhs_shape, const Shape<ndim>& rhs_shape, const Shape<ndim>& rstride, const Shape<ndim>& lhs_stride, const Shape<ndim>& rhs_stride) { Shape<ndim> coord = mxnet_op::unravel(idx, small_shape); const index_t idx_big0 = mxnet_op::ravel(coord, big_shape); const index_t idx_lhs0 = mxnet_op::ravel(coord, lhs_shape0); const index_t idx_rhs0 = mxnet_op::ravel(coord, rhs_shape0); DType val, residual; Reducer::SetInitValue(val, residual); for (size_t k = 0; k < M; ++k) { Shape<ndim> coord_big = mxnet_op::unravel(k, rshape); index_t idx_big = idx_big0 + mxnet_op::dot(coord_big, rstride); Shape<ndim> coord_lhs = mxnet_op::unravel(k, lhs_shape); index_t idx_lhs = idx_lhs0 + mxnet_op::dot(coord_lhs, lhs_stride); Shape<ndim> coord_rhs = mxnet_op::unravel(k, rhs_shape); index_t idx_rhs = idx_rhs0 + mxnet_op::dot(coord_rhs, rhs_stride); Reducer::Reduce(val, OP1::Map(big[idx_big], OP2::Map(lhs[idx_lhs], rhs[idx_rhs])), residual); } Reducer::Finalize(val, residual); assign(&small[idx], addto, val); } template <typename Reducer, int ndim, typename DType, typename OP1, typename OP2> void seq_reduce_compute(const size_t N, const size_t M, const bool addto, const DType* big, const DType* lhs, const DType* rhs, DType* small, const Shape<ndim> big_shape, const Shape<ndim> small_shape, const Shape<ndim> rshape, const Shape<ndim> rstride, const Shape<ndim> lhs_shape, const Shape<ndim> lhs_stride, const Shape<ndim> rhs_shape, const Shape<ndim> rhs_stride, const Shape<ndim>& lhs_shape0, const Shape<ndim>& rhs_shape0) { #pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount()) for (index_t idx = 0; idx < static_cast<index_t>(N); ++idx) { seq_reduce_assign<Reducer, ndim, DType, OP1, OP2>(idx, M, addto, big, lhs, rhs, small, big_shape, lhs_shape0, rhs_shape0, small_shape, rshape, lhs_shape, rhs_shape, rstride, lhs_stride, rhs_stride); } } template <typename Reducer, int ndim, typename DType, typename OP1, typename OP2> void Reduce(Stream<cpu>* s, const TBlob& small, const OpReqType req, const Tensor<cpu, 1, char>& workspace, const TBlob& big, const TBlob& lhs, const TBlob& rhs) { if (req == kNullOp) return; Shape<ndim> rshape, rstride; diff(small.shape_.get<ndim>(), big.shape_.get<ndim>(), &rshape, &rstride); size_t N = small.shape_.Size(); size_t M = rshape.Size(); Shape<ndim> lhs_shape, lhs_stride; diff(small.shape_.get<ndim>(), lhs.shape_.get<ndim>(), &lhs_shape, &lhs_stride); Shape<ndim> rhs_shape, rhs_stride; diff(small.shape_.get<ndim>(), rhs.shape_.get<ndim>(), &rhs_shape, &rhs_stride); seq_reduce_compute<Reducer, ndim, DType, OP1, OP2>(N, M, req == kAddTo, big.dptr<DType>(), lhs.dptr<DType>(), rhs.dptr<DType>(), small.dptr<DType>(), big.shape_.get<ndim>(), small.shape_.get<ndim>(), rshape, rstride, lhs_shape, lhs_stride, rhs_shape, rhs_stride, lhs.shape_.get<ndim>(), rhs.shape_.get<ndim>()); } #if MXNET_USE_CUDA void RTCReduce(const OpContext& ctx, const TBlob& small, const OpReqType req, const Tensor<gpu, 1, char>& workspace, const TBlob& big, const std::string& reducer, int ndim, const std::string& OP, const bool use_index = false); void RTCReduce(const OpContext& ctx, const TBlob& small, const OpReqType req, const Tensor<gpu, 1, char>& workspace, const TBlob& big, const TBlob& lhs, const TBlob& rhs, const std::string& reducer, int ndim, const std::string& OP1, const std::string& OP2); #endif } // namespace broadcast } // namespace op } // namespace mxnet #endif // MXNET_OPERATOR_TENSOR_BROADCAST_REDUCE_INL_H_
ipv4_crypt.c
// Format-preserving IPv4 encryption // Maps each IPv4 address onto a different IPv4 address via a 64-bit key. // // int ipv4_encrypt(char *ip, const void *key); // int ipv4_decrypt(char *ip, const void *key); // // This is free and unencumbered software released into the public domain. // Decode a quad-dotted IPv4 address string into a numerical address. // Returns -1 for invalid input, otherwise the numerical address. static long long ipv4_decode(const char *s) { unsigned long ip = 0; int c = 0, n = 0, v = 0; for (const char *p = s; ; p++) { switch (*p) { case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': v = v*10 + *p - '0'; if (v > 255) return -1; n++; break; case '.': case 0: if (!n || c == 4) return -1; ip = ip<<8 | v; c++; if (!*p) { return c == 4 ? (long long)ip : -1LL; } n = v = 0; break; default: return -1; } } } // Encode a numerical IPv4 address into a quad-dotted address string. The // destination buffer size must be at least 16 bytes. static void ipv4_encode(char *s, unsigned long ip) { for (int i = 3; i >= 0; i--) { int v = ip>>(i*8) & 0xff; *s = '0' + v/100 ; s += v >= 100; *s = '0' + v/10%10; s += v >= 10; *s = '0' + v%10 ; s++; *s = i ? '.' : 0 ; s++; } } // Encrypt a quad-dotted IPv4 address in place using format-preserving // encryption. The key size is 8 bytes, and the buffer must have room for // at least 16 bytes. Returns 1 on success or 0 if the input was invalid. int ipv4_encrypt(char *s, const void *key) { long long r = ipv4_decode(s); if (r < 0) { return 0; } const unsigned char *p = key; unsigned long k0 = (unsigned long)p[0] << 0 | (unsigned long)p[1] << 8 | (unsigned long)p[2] << 16 | (unsigned long)p[3] << 24; unsigned long k1 = (unsigned long)p[4] << 0 | (unsigned long)p[5] << 8 | (unsigned long)p[6] << 16 | (unsigned long)p[7] << 24; unsigned long ip = r; ip += k0; ip &= 0xffffffffU; ip ^= ip >> 17; ip *= 0x9e485565U; ip += k1; ip &= 0xffffffffU; ip ^= ip >> 16; ip *= 0xef1d6b47U; ip &= 0xffffffffU; ip ^= ip >> 16; ipv4_encode(s, ip ^ k0 ^ k1); return 1; } // Decrypt a quad-dotted IPv4 address in place using format-preserving // encryption. The key size is 8 bytes, and the buffer must have room for // at least 16 bytes. Returns 1 on success or 0 if the input was invalid. int ipv4_decrypt(char *s, const void *key) { long long r = ipv4_decode(s); if (r < 0) { return 0; } const unsigned char *p = key; unsigned long k0 = (unsigned long)p[0] << 0 | (unsigned long)p[1] << 8 | (unsigned long)p[2] << 16 | (unsigned long)p[3] << 24; unsigned long k1 = (unsigned long)p[4] << 0 | (unsigned long)p[5] << 8 | (unsigned long)p[6] << 16 | (unsigned long)p[7] << 24; unsigned long ip = r ^ k0 ^ k1; ip ^= ip >> 16; ip *= 0xeb00ce77U; ip &= 0xffffffffU; ip ^= ip >> 16; ip -= k1; ip *= 0x88ccd46dU; ip &= 0xffffffffU; ip ^= ip >> 17; ip -= k0; ipv4_encode(s, ip & 0xffffffffU); return 1; } #ifdef TEST // Usage: // $ cc -DTEST -O3 -fopenmp -o ipv4_crypt ipv4_crypt.c // $ printf '%s\n' 127.0.0.1 10.0.0.1 | ./ipv4_crypt #include <stdio.h> #include <string.h> int main(void) { char buf[32]; unsigned char key[8] = {0xab, 0xfc, 0x0d, 0x86, 0xea, 0x47, 0x56, 0xc5}; while (fgets(buf, sizeof(buf), stdin)) { char *e = strchr(buf, '\n'); if (e) *e = 0; int r = ipv4_encrypt(buf, key); if (!r) { puts("INVALID"); continue; } printf("%s\t", buf); ipv4_decrypt(buf, key); puts(buf); } /* Test encode/decode */ #pragma omp parallel for for (long long ip = 0; ip < 1LL<<32; ip++) { char want[16], got[16]; sprintf(want, "%d.%d.%d.%d", (int)(ip >> 24 & 0xff), (int)(ip >> 16 & 0xff), (int)(ip >> 8 & 0xff), (int)(ip >> 0 & 0xff)); ipv4_encode(got, ip); if (strcmp(want, got)) { printf("FAIL: (encode) %08llx, %s != %s\n", ip, want, got); } long long r = ipv4_decode(want); ipv4_encode(got, r); if (r != ip) { printf("FAIL: (decode) %08llx, %s != %s\n", ip, want, got); } } /* Test encrypt/decrypt */ #pragma omp parallel for for (long long ip = 0; ip < 1LL<<32; ip++) { char want[16], got[16]; ipv4_encode(want, ip); ipv4_encode(got, ip); ipv4_encrypt(got, key); ipv4_decrypt(got, key); if (strcmp(want, got)) { printf("FAIL: (encrypt) %08llx, %s != %s\n", ip, want, got); } } } #endif
draw.c
// // Created by sachetto on 11/11/17. // #include "draw.h" #include "../models_library/ten_tusscher_2006.h" #include <GL/freeglut.h> #define pi 3.14159265f #define rad ((pi)/180.0f) #define WIN_WIDTH 1024 #define WIN_HEIGTH 768 #define NUM_COLORS 257 static const double color[NUM_COLORS][3] = { {0.2298057,0.298717966,0.753683153}, {0.234299935,0.305559204,0.759874796}, {0.238810063,0.312388385,0.766005866}, {0.243336663,0.319205292,0.772075394}, {0.247880265,0.326009656,0.778082421}, {0.25244136,0.332801165,0.784026001}, {0.257020396,0.339579464,0.789905199}, {0.261617779,0.346344164,0.79571909}, {0.26623388,0.353094838,0.801466763}, {0.270869029,0.359831032,0.807147315}, {0.275523523,0.36655226,0.812759858}, {0.28019762,0.373258014,0.818303516}, {0.284891546,0.379947761,0.823777422}, {0.289605495,0.386620945,0.829180725}, {0.294339624,0.393276993,0.834512584}, {0.299094064,0.399915313,0.839772171}, {0.30386891,0.406535296,0.84495867}, {0.308664231,0.413136319,0.850071279}, {0.313480065,0.419717745,0.855109207}, {0.318316422,0.426278924,0.860071679}, {0.323173283,0.432819194,0.864957929}, {0.328050603,0.439337884,0.869767207}, {0.332948312,0.445834313,0.874498775}, {0.337866311,0.45230779,0.87915191}, {0.342804478,0.458757618,0.883725899}, {0.347762667,0.465183092,0.888220047}, {0.352740705,0.471583499,0.892633669}, {0.357738399,0.477958123,0.896966095}, {0.362755532,0.484306241,0.90121667}, {0.367791863,0.490627125,0.905384751}, {0.372847134,0.496920043,0.909469711}, {0.37792106,0.503184261,0.913470934}, {0.38301334,0.50941904,0.917387822}, {0.38812365,0.515623638,0.921219788}, {0.39325165,0.521797312,0.924966262}, {0.398396976,0.527939316,0.928626686}, {0.40355925,0.534048902,0.932200518}, {0.408738074,0.540125323,0.93568723}, {0.413933033,0.546167829,0.939086309}, {0.419143694,0.552175668,0.942397257}, {0.424369608,0.558148092,0.945619588}, {0.429610311,0.564084349,0.948752835}, {0.434865321,0.56998369,0.951796543}, {0.440134144,0.575845364,0.954750272}, {0.445416268,0.581668623,0.957613599}, {0.450711169,0.587452719,0.960386113}, {0.456018308,0.593196905,0.96306742}, {0.461337134,0.598900436,0.96565714}, {0.46666708,0.604562568,0.968154911}, {0.472007569,0.61018256,0.970560381}, {0.477358011,0.615759672,0.972873218}, {0.482717804,0.621293167,0.975093102}, {0.488086336,0.626782311,0.97721973}, {0.493462982,0.632226371,0.979252813}, {0.498847107,0.637624618,0.981192078}, {0.504238066,0.642976326,0.983037268}, {0.509635204,0.648280772,0.98478814}, {0.515037856,0.653537236,0.986444467}, {0.520445349,0.658745003,0.988006036}, {0.525857,0.66390336,0.989472652}, {0.531272118,0.669011598,0.990844132}, {0.536690004,0.674069012,0.99212031}, {0.542109949,0.679074903,0.993301037}, {0.54753124,0.684028574,0.994386177}, {0.552953156,0.688929332,0.995375608}, {0.558374965,0.693776492,0.996269227}, {0.563795935,0.698569369,0.997066945}, {0.569215322,0.703307287,0.997768685}, {0.574632379,0.707989572,0.99837439}, {0.580046354,0.712615557,0.998884016}, {0.585456486,0.717184578,0.999297533}, {0.590862011,0.721695979,0.999614929}, {0.596262162,0.726149107,0.999836203}, {0.601656165,0.730543315,0.999961374}, {0.607043242,0.734877964,0.999990472}, {0.61242261,0.739152418,0.999923544}, {0.617793485,0.743366047,0.999760652}, {0.623155076,0.747518228,0.999501871}, {0.628506592,0.751608345,0.999147293}, {0.633847237,0.755635786,0.998697024}, {0.639176211,0.759599947,0.998151185}, {0.644492714,0.763500228,0.99750991}, {0.649795942,0.767336039,0.996773351}, {0.655085089,0.771106793,0.995941671}, {0.660359348,0.774811913,0.995015049}, {0.665617908,0.778450826,0.993993679}, {0.670859959,0.782022968,0.992877768}, {0.676084688,0.78552778,0.991667539}, {0.681291281,0.788964712,0.990363227}, {0.686478925,0.792333219,0.988965083}, {0.691646803,0.795632765,0.987473371}, {0.696794099,0.798862821,0.985888369}, {0.701919999,0.802022864,0.984210369}, {0.707023684,0.805112381,0.982439677}, {0.712104339,0.808130864,0.980576612}, {0.717161148,0.811077814,0.978621507}, {0.722193294,0.813952739,0.976574709}, {0.727199962,0.816755156,0.974436577}, {0.732180337,0.81948459,0.972207484}, {0.737133606,0.82214057,0.969887816}, {0.742058956,0.824722639,0.967477972}, {0.746955574,0.827230344,0.964978364}, {0.751822652,0.829663241,0.962389418}, {0.756659379,0.832020895,0.959711569}, {0.761464949,0.834302879,0.956945269}, {0.766238556,0.836508774,0.95409098}, {0.770979397,0.838638169,0.951149176}, {0.775686671,0.840690662,0.948120345}, {0.780359577,0.842665861,0.945004985}, {0.78499732,0.84456338,0.941803607}, {0.789599105,0.846382843,0.938516733}, {0.79416414,0.848123884,0.935144898}, {0.798691636,0.849786142,0.931688648}, {0.803180808,0.85136927,0.928148539}, {0.807630872,0.852872925,0.92452514}, {0.812041048,0.854296776,0.92081903}, {0.81641056,0.855640499,0.917030798}, {0.820738635,0.856903782,0.913161047}, {0.825024503,0.85808632,0.909210387}, {0.829267397,0.859187816,0.90517944}, {0.833466556,0.860207984,0.901068838}, {0.837621221,0.861146547,0.896879224}, {0.841730637,0.862003236,0.892611249}, {0.845794055,0.862777795,0.888265576}, {0.849810727,0.863469972,0.883842876}, {0.853779913,0.864079527,0.87934383}, {0.857700874,0.864606232,0.874769128}, {0.861572878,0.865049863,0.870119469}, {0.865395197,0.86541021,0.865395561}, {0.86977749,0.863633958,0.859948576}, {0.874064226,0.861776352,0.854466231}, {0.878255583,0.859837644,0.848949435}, {0.882351728,0.857818097,0.843399101}, {0.886352818,0.85571798,0.837816138}, {0.890259,0.853537573,0.832201453}, {0.89407041,0.851277164,0.826555954}, {0.897787179,0.848937047,0.820880546}, {0.901409427,0.846517528,0.815176131}, {0.904937269,0.844018919,0.809443611}, {0.908370816,0.841441541,0.803683885}, {0.911710171,0.838785722,0.79789785}, {0.914955433,0.836051799,0.792086401}, {0.918106696,0.833240115,0.786250429}, {0.921164054,0.830351023,0.780390824}, {0.924127593,0.827384882,0.774508472}, {0.926997401,0.824342058,0.768604257}, {0.929773562,0.821222926,0.76267906}, {0.932456159,0.818027865,0.756733758}, {0.935045272,0.814757264,0.750769226}, {0.937540984,0.811411517,0.744786333}, {0.939943375,0.807991025,0.738785947}, {0.942252526,0.804496196,0.732768931}, {0.944468518,0.800927443,0.726736146}, {0.946591434,0.797285187,0.720688446}, {0.948621357,0.793569853,0.714626683}, {0.950558373,0.789781872,0.708551706}, {0.952402567,0.785921682,0.702464356}, {0.954154029,0.781989725,0.696365473}, {0.955812849,0.777986449,0.690255891}, {0.957379123,0.773912305,0.68413644}, {0.958852946,0.769767752,0.678007945}, {0.960234418,0.765553251,0.671871226}, {0.961523642,0.761269267,0.665727098}, {0.962720725,0.756916272,0.659576372}, {0.963825777,0.752494738,0.653419853}, {0.964838913,0.748005143,0.647258341}, {0.965760251,0.743447967,0.64109263}, {0.966589914,0.738823693,0.634923509}, {0.96732803,0.734132809,0.628751763}, {0.967974729,0.729375802,0.62257817}, {0.96853015,0.724553162,0.616403502}, {0.968994435,0.719665383,0.610228525}, {0.969367729,0.714712956,0.604054002}, {0.969650186,0.709696378,0.597880686}, {0.969841963,0.704616143,0.591709328}, {0.969943224,0.699472746,0.585540669}, {0.969954137,0.694266682,0.579375448}, {0.969874878,0.688998447,0.573214394}, {0.969705626,0.683668532,0.567058232}, {0.96944657,0.678277431,0.560907681}, {0.969097901,0.672825633,0.554763452}, {0.968659818,0.667313624,0.54862625}, {0.968132528,0.661741889,0.542496774}, {0.967516241,0.656110908,0.536375716}, {0.966811177,0.650421156,0.530263762}, {0.966017559,0.644673104,0.524161591}, {0.965135621,0.638867216,0.518069875}, {0.964165599,0.63300395,0.511989279}, {0.963107739,0.627083758,0.505920462}, {0.961962293,0.621107082,0.499864075}, {0.960729521,0.615074355,0.493820764}, {0.959409687,0.608986,0.487791167}, {0.958003065,0.602842431,0.481775914}, {0.956509936,0.596644046,0.475775629}, {0.954930586,0.590391232,0.46979093}, {0.95326531,0.584084361,0.463822426}, {0.951514411,0.57772379,0.457870719}, {0.949678196,0.571309856,0.451936407}, {0.947756983,0.564842879,0.446020077}, {0.945751096,0.558323158,0.440122312}, {0.943660866,0.551750968,0.434243684}, {0.941486631,0.545126562,0.428384763}, {0.939228739,0.538450165,0.422546107}, {0.936887543,0.531721972,0.41672827}, {0.934463404,0.524942147,0.410931798}, {0.931956691,0.518110821,0.40515723}, {0.929367782,0.511228087,0.399405096}, {0.92669706,0.504293997,0.393675922}, {0.923944917,0.49730856,0.387970225}, {0.921111753,0.490271735,0.382288516}, {0.918197974,0.483183431,0.376631297}, {0.915203996,0.476043498,0.370999065}, {0.912130241,0.468851724,0.36539231}, {0.908977139,0.461607831,0.359811513}, {0.905745128,0.454311462,0.354257151}, {0.902434654,0.446962183,0.348729691}, {0.89904617,0.439559467,0.343229596}, {0.895580136,0.43210269,0.33775732}, {0.892037022,0.424591118,0.332313313}, {0.888417303,0.417023898,0.326898016}, {0.884721464,0.409400045,0.321511863}, {0.880949996,0.401718425,0.316155284}, {0.877103399,0.393977745,0.310828702}, {0.873182178,0.386176527,0.305532531}, {0.869186849,0.378313092,0.300267182}, {0.865117934,0.370385535,0.295033059}, {0.860975962,0.362391695,0.289830559}, {0.85676147,0.354329127,0.284660075}, {0.852475004,0.346195061,0.279521991}, {0.848117114,0.337986361,0.27441669}, {0.843688361,0.329699471,0.269344545}, {0.839189312,0.32133036,0.264305927}, {0.834620542,0.312874446,0.259301199}, {0.829982631,0.304326513,0.254330723}, {0.82527617,0.295680611,0.249394851}, {0.820501754,0.286929926,0.244493934}, {0.815659988,0.278066636,0.239628318}, {0.810751482,0.269081721,0.234798343}, {0.805776855,0.259964733,0.230004348}, {0.800736732,0.250703507,0.225246666}, {0.795631745,0.24128379,0.220525627}, {0.790462533,0.231688768,0.215841558}, {0.785229744,0.221898442,0.211194782}, {0.779934029,0.211888813,0.20658562}, {0.774576051,0.201630762,0.202014392}, {0.769156474,0.191088518,0.197481414}, {0.763675975,0.180217488,0.192987001}, {0.758135232,0.168961101,0.188531467}, {0.752534934,0.157246067,0.184115123}, {0.746875773,0.144974956,0.179738284}, {0.741158452,0.132014017,0.175401259}, {0.735383675,0.1181719,0.171104363}, {0.729552157,0.103159409,0.166847907}, {0.723664618,0.086504694,0.162632207}, {0.717721782,0.067344036,0.158457578}, {0.711724383,0.043755173,0.154324339}, {0.705673158,0.01555616,0.150232812}}; GLdouble * get_color(double value) { // #define NUM_COLORS 4 // static float color[NUM_COLORS][3] = { {0,0,1}, {0,1,0}, {1,1,0}, {1,0,0} }; int idx1; // |-- Our desired color will be between these two indexes in "color". int idx2; // | double fractBetween = 0; // Fraction between "idx1" and "idx2" where our value is. if(value <= 0) { idx1 = idx2 = 0; } // accounts for an input <=0 else if(value >= 1) { idx1 = idx2 = NUM_COLORS-1; } // accounts for an input >=0 else { value = value * (NUM_COLORS-1); // Will multiply value by 3. idx1 = (int)floor(value); // Our desired color will be after this index. idx2 = idx1+1; // ... and before this index (inclusive). fractBetween = value - (double)idx1; // Distance between the two indexes (0-1). } double red = (color[idx2][0] - color[idx1][0])*fractBetween + color[idx1][0]; double green = (color[idx2][1] - color[idx1][1])*fractBetween + color[idx1][1]; double blue = (color[idx2][2] - color[idx1][2])*fractBetween + color[idx1][2]; GLdouble *result = (GLdouble *) malloc(sizeof(GLdouble) * 4); result[0] = red; result[1] = green; result[2] = blue; result[3] = 1.0; return result; } static void draw_cube(double x, double y, double z, double face_size, double v) { glPushMatrix(); glTranslated(x, y, z); glBegin(GL_QUADS); GLdouble *color; v = (v - INITIAL_V)/(40.0-INITIAL_V); color = get_color(v); glColor4dv(color); free(color); glNormal3d(0.0f, 0.0f, -1.0f); glVertex3d(face_size, -face_size, -face_size); glVertex3d(-face_size, -face_size, -face_size); glVertex3d(-face_size, face_size, -face_size); glVertex3d(face_size, face_size, -face_size); glNormal3d(0.0f, 0.0f, 1.0f); glVertex3d(-face_size, -face_size, face_size); glVertex3d(face_size, -face_size, face_size); glVertex3d(face_size, face_size, face_size); glVertex3d(-face_size, face_size, face_size); glNormal3d(1.0f, 0.0f, 0.0f); glVertex3d(face_size, -face_size, face_size); glVertex3d(face_size, -face_size, -face_size); glVertex3d(face_size, face_size, -face_size); glVertex3d(face_size, face_size, face_size); glNormal3d(-1.0f, 0.0f, 0.0f); glVertex3d(-face_size, -face_size, -face_size); glVertex3d(-face_size, -face_size, face_size); glVertex3d(-face_size, face_size, face_size); glVertex3d(-face_size, face_size, -face_size); glNormal3d(0.0f, -1.0f, 0.0f); glVertex3d(-face_size, -face_size, -face_size); glVertex3d(face_size, -face_size, -face_size); glVertex3d(face_size, -face_size, face_size); glVertex3d(-face_size, -face_size, face_size); glNormal3d(0.0f, 1.0f, 0.0f); glVertex3d(face_size, face_size, -face_size); glVertex3d(-face_size, face_size, -face_size); glVertex3d(-face_size, face_size, face_size); glVertex3d(face_size, face_size, face_size); glEnd(); glPopMatrix(); } // observer float eyeX; float eyeY; float eyeZ; // object center float centerX; float centerY; float centerZ; // vector target float xTarget; float yTarget; float zTarget; // Variables used to navigate through the world. float step; float upDownAngle; float leftRigthAngle; bool look_around; bool walk; float window_h, window_w; //Variables used by perspective projection. float visionAngle; float aspect; float Near; float Far; //Variables which are used to rotate drawings and to make demostration. float xAngle; float yAngle; float zAngle; float mousePreviousX; float mousePreviousY; int mouseButton; static void init_variables() { // Sets the vision angle of the camera. visionAngle = 45; // Sets the initial observer's coordinates. It corresponds to position the // the camera initialy. eyeX = 3; eyeY = 0.5; eyeZ = 0.5; // Sets the initial coordinates of the target point. centerX = 0.5; centerY = 0.5; centerZ = 0.5; // Sets the camera to not move. walk = false; look_around = false; Near = 0.1; Far = 1000; // Sets all drawing rotation angles. xAngle = 90; yAngle = 90; zAngle = 0; // Sets the step walked by the obsever and the rotation angle of the camera. step = 0.04; leftRigthAngle = -90; upDownAngle = 0; glPolygonMode(GL_FRONT_AND_BACK, GL_FILL); } static void moving_mouse( int x, int y ) { const float sensibility = 0.333; if( mouseButton == GLUT_LEFT_BUTTON ) { zAngle += (x - mousePreviousX) * sensibility; yAngle += (y - mousePreviousY) * sensibility; } mousePreviousX = x; mousePreviousY = y; glutPostRedisplay(); } void keyboard( unsigned char key, int x, int y ) { switch(key) { // Amplification case '=': case '+': { visionAngle -= 2; if( visionAngle < 0.01 ) visionAngle = 0.01; // minimum vision angle. break; } // shrink case '-': case '_': { visionAngle += 2; if( visionAngle > 130 ) visionAngle = 130; // max vision angle. break; } case 'a': case 'A': { zAngle += 5; break; } case 'q': case 'Q': { zAngle -= 5; break; } case 'd': case 'D': { yAngle += 5; break; } case 'e': case 'E': { yAngle -= 5; break; } case 'f': case 'F': { glPolygonMode(GL_FRONT_AND_BACK, GL_FILL); break; } case 'l': case 'L': { glPolygonMode(GL_FRONT_AND_BACK, GL_LINE); break; } case 'p': case 'P': { glPolygonMode(GL_FRONT_AND_BACK, GL_POINT); break; } case 'r': case 'R': { init_variables(); break; } default: { break; } } glutPostRedisplay(); } void initialize_lighting( void ) { // Ambient light. float light_ambient[4] = { 0.2, 0.2, 0.2, 1.0 }; float light_diffuse[4] = { 0.7, 0.7, 0.7, 1.0 }; float light_specular[4] = { 1.0, 1.0, 1.0, 1.0 }; float light_position[4] = { 10.0, 50.0, 40.0, 1.0 }; // Shininess float specularity[4] = { 0.8, 0.8, 0.8, 1.0 }; int shininess = 60; // Sets background color to white. glClearColor ( 1.0, 1.0, 1.0, 0); glShadeModel( GL_SMOOTH ); // Enables lighting. glEnable( GL_LIGHTING ); // Enables the light ambient. glLightModelfv( GL_LIGHT_MODEL_AMBIENT, light_ambient ); // Sets the light0 parameters. glLightfv( GL_LIGHT0, GL_AMBIENT, light_ambient ); glLightfv( GL_LIGHT0, GL_DIFFUSE, light_diffuse ); glLightfv( GL_LIGHT0, GL_SPECULAR, light_specular ); glLightfv( GL_LIGHT0, GL_POSITION, light_position ); // Enables the definition of the material color from the current color. glEnable( GL_COLOR_MATERIAL ); // Define how a material reflexes light. glMaterialfv( GL_FRONT_AND_BACK, GL_SPECULAR, specularity ); glMateriali( GL_FRONT_AND_BACK, GL_SHININESS, shininess ); // Turns on the light0. glEnable( GL_LIGHT0 ); // Enables the depth-buffering. glEnable( GL_DEPTH_TEST ); } static void vision( void ) { glMatrixMode( GL_PROJECTION ); glLoadIdentity(); int w = glutGet( GLUT_WINDOW_WIDTH ); int h = glutGet( GLUT_WINDOW_HEIGHT ); aspect = w/h; gluPerspective( visionAngle, aspect, Near, Far ); // glMatrixMode( GL_MODELVIEW ); glLoadIdentity(); gluLookAt( eyeX, eyeY, eyeZ, centerX, centerY, centerZ, 0, 0, 1 ); //glTranslatef( 0.5, 0.5, 0.5 ); glRotatef( xAngle, 1, 0, 0 ); glRotatef( yAngle, 0, 1, 0 ); glRotatef( zAngle, 0, 0, 1 ); //glTranslatef( -0.5f, -0.5f, -0.5f ); //TODO: we need to calculate the scale based on the grid size glScaled(2, 2, 2); } static void reshape( GLsizei w, GLsizei h ) { // Only to avoid division by zero. if ( h == 0 ) h = 1; // Specifies the viewport size. glViewport(0, 0, w, h); // Corrects the aspect of the window. aspect = (GLfloat)w / (GLfloat)h; vision(); } static void navigate( void ) { // Vector that points towards the place where the observer looks at. xTarget = centerX - eyeX; yTarget = centerY - eyeY; zTarget = centerZ - eyeZ; // Size of the target vector. float targetSize = sqrtf( xTarget*xTarget + yTarget*yTarget + zTarget*zTarget ); // Walking through the world. if (walk) { // Unitary target vector. float xUnitaryTarget = xTarget / targetSize; float yUnitaryTarget = yTarget / targetSize; float zUnitaryTarget = zTarget / targetSize; // Updates the position of the target. centerX = centerX + ( step * xUnitaryTarget ); centerY = centerY + ( step * yUnitaryTarget ); centerZ = centerZ + ( step * zUnitaryTarget ); // Updates the observer's position. eyeX = eyeX + ( step * xUnitaryTarget ); eyeY = eyeY + ( step * yUnitaryTarget ); eyeZ = eyeZ + ( step * zUnitaryTarget ); } // Looking around through the world. if( look_around ) { // Rotates the vector target at origin of the coordinates system. xTarget = targetSize * cosf(rad * upDownAngle) * sinf(rad * leftRigthAngle); yTarget = targetSize * cosf(rad * upDownAngle) * cosf(rad * leftRigthAngle); zTarget = targetSize * sinf(rad * upDownAngle); // Translates the vector target from the origin of the system of coordinates // to its previous position. centerX = xTarget + eyeX; centerY = yTarget + eyeY; centerZ = zTarget + eyeZ; } } static void special_keys( int key, int x, int y ) { // Goes ahead in the direction pointed by the camera. if( key == GLUT_KEY_UP ) { look_around = true; upDownAngle -= 1; navigate(); look_around = false; } // Comes back from the direction pointed by the camera. if( key == GLUT_KEY_DOWN ) { look_around = true; upDownAngle += 1; navigate(); look_around = false; } // Turns the camera to its right side. if( key == GLUT_KEY_RIGHT ) { look_around = true; leftRigthAngle -= 1; navigate(); look_around = false; } // Turns the camera to its left side. if( key == GLUT_KEY_LEFT ) { look_around = true; leftRigthAngle += 1; navigate(); look_around = false; } // Turns the camera downward. if( key == GLUT_KEY_PAGE_DOWN ) { walk = true; step = -1 * fabsf( step ); navigate(); walk = false; } // Turns the camera upward. if( key == GLUT_KEY_PAGE_UP ) { walk = true; step = fabsf( step ); navigate(); walk = false; } // Full screen. if( key == GLUT_KEY_F1 ) { glutFullScreen(); } // Default screen if( key == GLUT_KEY_F2 ) { glutReshapeWindow( WIN_WIDTH, WIN_HEIGTH ); glutPositionWindow( 10, 30 ); } glutPostRedisplay(); } static void display() { glClearColor(0, 0, 0, 1); glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); vision(); if (grid_to_draw) { double size = grid_to_draw->side_length; uint32_t n_active = grid_to_draw->num_active_cells; struct cell_node **ac = grid_to_draw->active_cells; struct cell_node *grid_cell; if (ac) { //#pragma omp parallel for for (int i = 0; i < n_active; i++) { grid_cell = ac[i]; if (grid_cell->active) { draw_cube(grid_cell->center_x / size, grid_cell->center_y / size, grid_cell->center_z / size, grid_cell->half_face_length / size, grid_cell->v); } } } } glutSwapBuffers(); } static void timer( int parameter ) { if(redraw) display(); glutTimerFunc( 20, timer, 0 ); } void stop_drawing() { } void init_opengl(int argc, char **argv) { glutInit( &argc, argv ); init_variables(); glutInitDisplayMode( GLUT_RGBA | GLUT_DEPTH | GLUT_DOUBLE ); glutInitWindowSize( WIN_WIDTH, WIN_HEIGTH ); glutCreateWindow( "GLUT" ); glutWMCloseFunc(stop_drawing); glEnable (GL_BLEND); glBlendFunc (GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); initialize_lighting(); glutDisplayFunc( display ); glutReshapeFunc( reshape ); glutKeyboardFunc(keyboard); glutSpecialFunc( special_keys ); glutMotionFunc( moving_mouse ); glutTimerFunc( 20, timer, 20 ); glEnable( GL_DEPTH_TEST ); glutSetOption( GLUT_ACTION_ON_WINDOW_CLOSE, GLUT_ACTION_GLUTMAINLOOP_RETURNS ); glutMainLoop(); }
rotLibIDLLoopsDbl.c
#include "rotLibIDLLoopsDbl.h" #include <stdlib.h> #include <stdio.h> #include <math.h> #ifdef _OPENMP #include <omp.h> #define OPENMPLIMIT 100000l #endif #include "rotationLibCDbl.h" int ax2cuLoop (double* ax, double* cu, unsigned long long n, int p){ int trash; unsigned long long i, iIn, iOut, axstep = 4, custep = 3; #ifdef _OPENMP if (n >= OPENMPLIMIT) {omp_set_num_threads(omp_get_num_procs());} else {omp_set_num_threads(1);} #endif #pragma omp parallel for \ default(shared) private( i,iIn, iOut, trash) firstprivate(axstep, custep) schedule (static) for (i=0;i<n;i++){ iIn = i * axstep; iOut = i * custep; trash = ax2cu(&(ax[iIn]), &(cu[iOut]), p); } return 1; } int ax2euLoop (double* ax, double* eu, unsigned long long n, int p){ int trash; unsigned long long i, iIn, iOut, axstep = 4, eustep = 3; #ifdef _OPENMP if (n >= OPENMPLIMIT) {omp_set_num_threads(omp_get_num_procs());} else {omp_set_num_threads(1);} #endif #pragma omp parallel for \ default(shared) private( i,iIn, iOut, trash) firstprivate(axstep, eustep) schedule (static) for (i=0;i<n;i++){ iIn = i * axstep; iOut = i * eustep; trash = ax2eu(&(ax[iIn]), &(eu[iOut]), p); } return 1; } int ax2hoLoop (double* ax, double* ho, unsigned long long n, int p){ int trash; unsigned long long i, iIn, iOut, axstep = 4, hostep = 3; #ifdef _OPENMP if (n >= OPENMPLIMIT) {omp_set_num_threads(omp_get_num_procs());} else {omp_set_num_threads(1);} #endif #pragma omp parallel for \ default(shared) private( i,iIn, iOut, trash) firstprivate(axstep, hostep) schedule (static) for (i=0;i<n;i++){ iIn = i * axstep; iOut = i * hostep; trash = ax2ho(&(ax[iIn]), &(ho[iOut]), p); } return 1; } int ax2omLoop (double* ax, double* om, unsigned long long n, int p){ int trash; unsigned long long i, iIn, iOut, axstep = 4, omstep = 9; #ifdef _OPENMP if (n >= OPENMPLIMIT) {omp_set_num_threads(omp_get_num_procs());} else {omp_set_num_threads(1);} #endif #pragma omp parallel for \ default(shared) private( i,iIn, iOut, trash) firstprivate(axstep, omstep) schedule (static) for (i=0;i<n;i++){ iIn = i * axstep; iOut = i * omstep; trash = ax2om(&(ax[iIn]), &(om[iOut]), p); } return 1; } int ax2quLoop (double* ax, double* qu, unsigned long long n, int p){ int trash; unsigned long long i, iIn, iOut, axstep = 4, qustep = 4; #ifdef _OPENMP if (n >= OPENMPLIMIT) {omp_set_num_threads(omp_get_num_procs());} else {omp_set_num_threads(1);} #endif #pragma omp parallel for \ default(shared) private( i,iIn, iOut, trash) firstprivate(axstep, qustep) schedule (static) for (i=0;i<n;i++){ iIn = i * axstep; iOut = i * qustep; trash = ax2qu(&(ax[iIn]), &(qu[iOut]), p); } return 1; } int ax2roLoop (double* ax, double* ro, unsigned long long n, int p){ int trash; unsigned long long i, iIn, iOut, axstep = 4, rostep = 4; #ifdef _OPENMP if (n >= OPENMPLIMIT) {omp_set_num_threads(omp_get_num_procs());} else {omp_set_num_threads(1);} #endif #pragma omp parallel for \ default(shared) private( i,iIn, iOut, trash) firstprivate(axstep, rostep) schedule (static) for (i=0;i<n;i++){ iIn = i * axstep; iOut = i * rostep; trash = ax2ro(&(ax[iIn]), &(ro[iOut]), p); } return 1; } int cu2axLoop (double* cu, double* ax, unsigned long long n, int p){ int trash; unsigned long long i, iIn, iOut, custep = 3, axstep = 4; #ifdef _OPENMP if (n >= OPENMPLIMIT) {omp_set_num_threads(omp_get_num_procs());} else {omp_set_num_threads(1);} #endif #pragma omp parallel for \ default(shared) private( i,iIn, iOut, trash) firstprivate(custep, axstep) schedule (static) for (i=0;i<n;i++){ iIn = i * custep; iOut = i * axstep; trash = cu2ax(&(cu[iIn]), &(ax[iOut]), p); } return 1; } int cu2euLoop (double* cu, double* eu, unsigned long long n, int p){ int trash; unsigned long long i, iIn, iOut, custep = 3, eustep = 3; #ifdef _OPENMP if (n >= OPENMPLIMIT) {omp_set_num_threads(omp_get_num_procs());} else {omp_set_num_threads(1);} #endif #pragma omp parallel for \ default(shared) private( i,iIn, iOut, trash) firstprivate(custep, eustep) schedule (static) for (i=0;i<n;i++){ iIn = i * custep; iOut = i * eustep; trash = cu2eu(&(cu[iIn]), &(eu[iOut]), p); } return 1; } int cu2hoLoop (double* cu, double* ho, unsigned long long n, int p){ int trash; unsigned long long i, iIn, iOut, custep = 3, hostep = 3; #ifdef _OPENMP if (n >= OPENMPLIMIT) {omp_set_num_threads(omp_get_num_procs());} else {omp_set_num_threads(1);} #endif #pragma omp parallel for \ default(shared) private( i,iIn, iOut, trash) firstprivate(custep, hostep) schedule (static) for (i=0;i<n;i++){ iIn = i * custep; iOut = i * hostep; trash = cu2ho(&(cu[iIn]), &(ho[iOut]), p); } return 1; } int cu2omLoop (double* cu, double* om, unsigned long long n, int p){ int trash; unsigned long long i, iIn, iOut, custep = 3, omstep = 9; #ifdef _OPENMP if (n >= OPENMPLIMIT) {omp_set_num_threads(omp_get_num_procs());} else {omp_set_num_threads(1);} #endif #pragma omp parallel for \ default(shared) private( i,iIn, iOut, trash) firstprivate(custep, omstep) schedule (static) for (i=0;i<n;i++){ iIn = i * custep; iOut = i * omstep; trash = cu2om(&(cu[iIn]), &(om[iOut]), p); } return 1; } int cu2quLoop (double* cu, double* qu, unsigned long long n, int p){ int trash; unsigned long long i, iIn, iOut, custep = 3, qustep = 4; #ifdef _OPENMP if (n >= OPENMPLIMIT) {omp_set_num_threads(omp_get_num_procs());} else {omp_set_num_threads(1);} #endif #pragma omp parallel for \ default(shared) private( i,iIn, iOut, trash) firstprivate(custep, qustep) schedule (static) for (i=0;i<n;i++){ iIn = i * custep; iOut = i * qustep; trash = cu2qu(&(cu[iIn]), &(qu[iOut]), p); } return 1; } int cu2roLoop (double* cu, double* ro, unsigned long long n, int p){ int trash; unsigned long long i, iIn, iOut, custep = 3, rostep = 4; #ifdef _OPENMP if (n >= OPENMPLIMIT) {omp_set_num_threads(omp_get_num_procs());} else {omp_set_num_threads(1);} #endif #pragma omp parallel for \ default(shared) private( i,iIn, iOut, trash) firstprivate(custep, rostep) schedule (static) for (i=0;i<n;i++){ iIn = i * custep; iOut = i * rostep; trash = cu2ro(&(cu[iIn]), &(ro[iOut]), p); } return 1; } int eu2axLoop (double* eu, double* ax, unsigned long long n, int p){ int trash; unsigned long long i, iIn, iOut, eustep = 3, axstep = 4; #ifdef _OPENMP if (n >= OPENMPLIMIT) {omp_set_num_threads(omp_get_num_procs());} else {omp_set_num_threads(1);} #endif #pragma omp parallel for \ default(shared) private( i,iIn, iOut, trash) firstprivate(eustep, axstep) schedule (static) for (i=0;i<n;i++){ iIn = i * eustep; iOut = i * axstep; trash = eu2ax(&(eu[iIn]), &(ax[iOut]), p); } return 1; } int eu2cuLoop (double* eu, double* cu, unsigned long long n, int p){ int trash; unsigned long long i, iIn, iOut, eustep = 3, custep = 3; #ifdef _OPENMP if (n >= OPENMPLIMIT) {omp_set_num_threads(omp_get_num_procs());} else {omp_set_num_threads(1);} #endif #pragma omp parallel for \ default(shared) private( i,iIn, iOut, trash) firstprivate(eustep, custep) schedule (static) for (i=0;i<n;i++){ iIn = i * eustep; iOut = i * custep; trash = eu2cu(&(eu[iIn]), &(cu[iOut]), p); } return 1; } int eu2hoLoop (double* eu, double* ho, unsigned long long n, int p){ int trash; unsigned long long i, iIn, iOut, eustep = 3, hostep = 3; #ifdef _OPENMP if (n >= OPENMPLIMIT) {omp_set_num_threads(omp_get_num_procs());} else {omp_set_num_threads(1);} #endif #pragma omp parallel for \ default(shared) private( i,iIn, iOut, trash) firstprivate(eustep, hostep) schedule (static) for (i=0;i<n;i++){ iIn = i * eustep; iOut = i * hostep; trash = eu2ho(&(eu[iIn]), &(ho[iOut]), p); } return 1; } int eu2omLoop (double* eu, double* om, unsigned long long n, int p){ int trash; unsigned long long i, iIn, iOut, eustep = 3, omstep = 9; #ifdef _OPENMP if (n >= OPENMPLIMIT) {omp_set_num_threads(omp_get_num_procs());} else {omp_set_num_threads(1);} #endif #pragma omp parallel for \ default(shared) private( i,iIn, iOut, trash) firstprivate(eustep, omstep) schedule (static) for (i=0;i<n;i++){ iIn = i * eustep; iOut = i * omstep; trash = eu2om(&(eu[iIn]), &(om[iOut]), p); } return 1; } int eu2quLoop (double* eu, double* qu, unsigned long long n, int p){ int trash; unsigned long long i, iIn, iOut, eustep = 3, qustep = 4; #ifdef _OPENMP if (n >= OPENMPLIMIT) {omp_set_num_threads(omp_get_num_procs());} else {omp_set_num_threads(1);} #endif #pragma omp parallel for \ default(shared) private( i,iIn, iOut, trash) firstprivate(eustep, qustep) schedule (static) for (i=0;i<n;i++){ iIn = i * eustep; iOut = i * qustep; trash = eu2qu(&(eu[iIn]), &(qu[iOut]), p); } return 1; } int eu2roLoop (double* eu, double* ro, unsigned long long n, int p){ int trash; unsigned long long i, iIn, iOut, eustep = 3, rostep = 4; #ifdef _OPENMP if (n >= OPENMPLIMIT) {omp_set_num_threads(omp_get_num_procs());} else {omp_set_num_threads(1);} #endif #pragma omp parallel for \ default(shared) private( i,iIn, iOut, trash) firstprivate(eustep, rostep) schedule (static) for (i=0;i<n;i++){ iIn = i * eustep; iOut = i * rostep; trash = eu2ro(&(eu[iIn]), &(ro[iOut]), p); } return 1; } int ho2axLoop (double* ho, double* ax, unsigned long long n, int p){ int trash; unsigned long long i, iIn, iOut, hostep = 3, axstep = 4; #ifdef _OPENMP if (n >= OPENMPLIMIT) {omp_set_num_threads(omp_get_num_procs());} else {omp_set_num_threads(1);} #endif #pragma omp parallel for \ default(shared) private( i,iIn, iOut, trash) firstprivate(hostep, axstep) schedule (static) for (i=0;i<n;i++){ iIn = i * hostep; iOut = i * axstep; trash = ho2ax(&(ho[iIn]), &(ax[iOut]), p); } return 1; } int ho2cuLoop (double* ho, double* cu, unsigned long long n, int p){ int trash; unsigned long long i, iIn, iOut, hostep = 3, custep = 3; #ifdef _OPENMP if (n >= OPENMPLIMIT) {omp_set_num_threads(omp_get_num_procs());} else {omp_set_num_threads(1);} #endif #pragma omp parallel for \ default(shared) private( i,iIn, iOut, trash) firstprivate(hostep, custep) schedule (static) for (i=0;i<n;i++){ iIn = i * hostep; iOut = i * custep; trash = ho2cu(&(ho[iIn]), &(cu[iOut]), p); } return 1; } int ho2euLoop (double* ho, double* eu, unsigned long long n, int p){ int trash; unsigned long long i, iIn, iOut, hostep = 3, eustep = 3; #ifdef _OPENMP if (n >= OPENMPLIMIT) {omp_set_num_threads(omp_get_num_procs());} else {omp_set_num_threads(1);} #endif #pragma omp parallel for \ default(shared) private( i,iIn, iOut, trash) firstprivate(hostep, eustep) schedule (static) for (i=0;i<n;i++){ iIn = i * hostep; iOut = i * eustep; trash = ho2eu(&(ho[iIn]), &(eu[iOut]), p); } return 1; } int ho2omLoop (double* ho, double* om, unsigned long long n, int p){ int trash; unsigned long long i, iIn, iOut, hostep = 3, omstep = 9; #ifdef _OPENMP if (n >= OPENMPLIMIT) {omp_set_num_threads(omp_get_num_procs());} else {omp_set_num_threads(1);} #endif #pragma omp parallel for \ default(shared) private( i,iIn, iOut, trash) firstprivate(hostep, omstep) schedule (static) for (i=0;i<n;i++){ iIn = i * hostep; iOut = i * omstep; trash = ho2om(&(ho[iIn]), &(om[iOut]), p); } return 1; } int ho2quLoop (double* ho, double* qu, unsigned long long n, int p){ int trash; unsigned long long i, iIn, iOut, hostep = 3, qustep = 4; #ifdef _OPENMP if (n >= OPENMPLIMIT) {omp_set_num_threads(omp_get_num_procs());} else {omp_set_num_threads(1);} #endif #pragma omp parallel for \ default(shared) private( i,iIn, iOut, trash) firstprivate(hostep, qustep) schedule (static) for (i=0;i<n;i++){ iIn = i * hostep; iOut = i * qustep; trash = ho2qu(&(ho[iIn]), &(qu[iOut]), p); } return 1; } int ho2roLoop (double* ho, double* ro, unsigned long long n, int p){ int trash; unsigned long long i, iIn, iOut, hostep = 3, rostep = 4; #ifdef _OPENMP if (n >= OPENMPLIMIT) {omp_set_num_threads(omp_get_num_procs());} else {omp_set_num_threads(1);} #endif #pragma omp parallel for \ default(shared) private( i,iIn, iOut, trash) firstprivate(hostep, rostep) schedule (static) for (i=0;i<n;i++){ iIn = i * hostep; iOut = i * rostep; trash = ho2ro(&(ho[iIn]), &(ro[iOut]), p); } return 1; } int om2axLoop (double* om, double* ax, unsigned long long n, int p){ int trash; unsigned long long i, iIn, iOut, omstep = 9, axstep = 4; #ifdef _OPENMP if (n >= OPENMPLIMIT) {omp_set_num_threads(omp_get_num_procs());} else {omp_set_num_threads(1);} #endif #pragma omp parallel for \ default(shared) private( i,iIn, iOut, trash) firstprivate(omstep, axstep) schedule (static) for (i=0;i<n;i++){ iIn = i * omstep; iOut = i * axstep; trash = om2ax(&(om[iIn]), &(ax[iOut]), p); } return 1; } int om2cuLoop (double* om, double* cu, unsigned long long n, int p){ int trash; unsigned long long i, iIn, iOut, omstep = 9, custep = 3; #ifdef _OPENMP if (n >= OPENMPLIMIT) {omp_set_num_threads(omp_get_num_procs());} else {omp_set_num_threads(1);} #endif #pragma omp parallel for \ default(shared) private( i,iIn, iOut, trash) firstprivate(omstep, custep) schedule (static) for (i=0;i<n;i++){ iIn = i * omstep; iOut = i * custep; trash = om2cu(&(om[iIn]), &(cu[iOut]), p); } return 1; } int om2euLoop (double* om, double* eu, unsigned long long n, int p){ int trash; unsigned long long i, iIn, iOut, omstep = 9, eustep = 3; #ifdef _OPENMP if (n >= OPENMPLIMIT) {omp_set_num_threads(omp_get_num_procs());} else {omp_set_num_threads(1);} #endif #pragma omp parallel for \ default(shared) private( i,iIn, iOut, trash) firstprivate(omstep, eustep) schedule (static) for (i=0;i<n;i++){ iIn = i * omstep; iOut = i * eustep; trash = om2eu(&(om[iIn]), &(eu[iOut]), p); } return 1; } int om2hoLoop (double* om, double* ho, unsigned long long n, int p){ int trash; unsigned long long i, iIn, iOut, omstep = 9, hostep = 3; #ifdef _OPENMP if (n >= OPENMPLIMIT) {omp_set_num_threads(omp_get_num_procs());} else {omp_set_num_threads(1);} #endif #pragma omp parallel for \ default(shared) private( i,iIn, iOut, trash) firstprivate(omstep, hostep) schedule (static) for (i=0;i<n;i++){ iIn = i * omstep; iOut = i * hostep; trash = om2ho(&(om[iIn]), &(ho[iOut]), p); } return 1; } int om2quLoop (double* om, double* qu, unsigned long long n, int p){ int trash; unsigned long long i, iIn, iOut, omstep = 9, qustep = 4; #ifdef _OPENMP if (n >= OPENMPLIMIT) {omp_set_num_threads(omp_get_num_procs());} else {omp_set_num_threads(1);} #endif #pragma omp parallel for \ default(shared) private( i,iIn, iOut, trash) firstprivate(omstep, qustep) schedule (static) for (i=0;i<n;i++){ iIn = i * omstep; iOut = i * qustep; trash = om2qu(&(om[iIn]), &(qu[iOut]), p); } return 1; } int om2roLoop (double* om, double* ro, unsigned long long n, int p){ int trash; unsigned long long i, iIn, iOut, omstep = 9, rostep = 4; #ifdef _OPENMP if (n >= OPENMPLIMIT) {omp_set_num_threads(omp_get_num_procs());} else {omp_set_num_threads(1);} #endif #pragma omp parallel for \ default(shared) private( i,iIn, iOut, trash) firstprivate(omstep, rostep) schedule (static) for (i=0;i<n;i++){ iIn = i * omstep; iOut = i * rostep; trash = om2ro(&(om[iIn]), &(ro[iOut]), p); } return 1; } int qu2axLoop (double* qu, double* ax, unsigned long long n, int p){ int trash; unsigned long long i, iIn, iOut, qustep = 4, axstep = 4; #ifdef _OPENMP if (n >= OPENMPLIMIT) {omp_set_num_threads(omp_get_num_procs());} else {omp_set_num_threads(1);} #endif #pragma omp parallel for \ default(shared) private( i,iIn, iOut, trash) firstprivate(qustep, axstep) schedule (static) for (i=0;i<n;i++){ iIn = i * qustep; iOut = i * axstep; trash = qu2ax(&(qu[iIn]), &(ax[iOut]), p); } return 1; } int qu2cuLoop (double* qu, double* cu, unsigned long long n, int p){ int trash; unsigned long long i, iIn, iOut, qustep = 4, custep = 3; #ifdef _OPENMP if (n >= OPENMPLIMIT) {omp_set_num_threads(omp_get_num_procs());} else {omp_set_num_threads(1);} #endif #pragma omp parallel for \ default(shared) private( i,iIn, iOut, trash) firstprivate(qustep, custep) schedule (static) for (i=0;i<n;i++){ iIn = i * qustep; iOut = i * custep; trash = qu2cu(&(qu[iIn]), &(cu[iOut]), p); } return 1; } int qu2euLoop (double* qu, double* eu, unsigned long long n, int p){ int trash; unsigned long long i, iIn, iOut, qustep = 4, eustep = 3; #ifdef _OPENMP if (n >= OPENMPLIMIT) {omp_set_num_threads(omp_get_num_procs());} else {omp_set_num_threads(1);} #endif #pragma omp parallel for \ default(shared) private( i,iIn, iOut, trash) firstprivate(qustep, eustep) schedule (static) for (i=0;i<n;i++){ iIn = i * qustep; iOut = i * eustep; trash = qu2eu(&(qu[iIn]), &(eu[iOut]), p); } return 1; } int qu2hoLoop (double* qu, double* ho, unsigned long long n, int p){ int trash; unsigned long long i, iIn, iOut, qustep = 4, hostep = 3; #ifdef _OPENMP if (n >= OPENMPLIMIT) {omp_set_num_threads(omp_get_num_procs());} else {omp_set_num_threads(1);} #endif #pragma omp parallel for \ default(shared) private( i,iIn, iOut, trash) firstprivate(qustep, hostep) schedule (static) for (i=0;i<n;i++){ iIn = i * qustep; iOut = i * hostep; trash = qu2ho(&(qu[iIn]), &(ho[iOut]), p); } return 1; } int qu2omLoop (double* qu, double* om, unsigned long long n, int p){ int trash; unsigned long long i, iIn, iOut, qustep = 4, omstep = 9; #ifdef _OPENMP if (n >= OPENMPLIMIT) {omp_set_num_threads(omp_get_num_procs());} else {omp_set_num_threads(1);} #endif #pragma omp parallel for \ default(shared) private( i,iIn, iOut, trash) firstprivate(qustep, omstep) schedule (static) for (i=0;i<n;i++){ iIn = i * qustep; iOut = i * omstep; trash = qu2om(&(qu[iIn]), &(om[iOut]), p); } return 1; } int qu2roLoop (double* qu, double* ro, unsigned long long n, int p){ int trash; unsigned long long i, iIn, iOut, qustep = 4, rostep = 4; #ifdef _OPENMP if (n >= OPENMPLIMIT) {omp_set_num_threads(omp_get_num_procs());} else {omp_set_num_threads(1);} #endif #pragma omp parallel for \ default(shared) private( i,iIn, iOut, trash) firstprivate(qustep, rostep) schedule (static) for (i=0;i<n;i++){ iIn = i * qustep; iOut = i * rostep; trash = qu2ro(&(qu[iIn]), &(ro[iOut]), p); } return 1; } int ro2axLoop (double* ro, double* ax, unsigned long long n, int p){ int trash; unsigned long long i, iIn, iOut, rostep = 4, axstep = 4; #ifdef _OPENMP if (n >= OPENMPLIMIT) {omp_set_num_threads(omp_get_num_procs());} else {omp_set_num_threads(1);} #endif #pragma omp parallel for \ default(shared) private( i,iIn, iOut, trash) firstprivate(rostep, axstep) schedule (static) for (i=0;i<n;i++){ iIn = i * rostep; iOut = i * axstep; trash = ro2ax(&(ro[iIn]), &(ax[iOut]), p); } return 1; } int ro2cuLoop (double* ro, double* cu, unsigned long long n, int p){ int trash; unsigned long long i, iIn, iOut, rostep = 4, custep = 3; #ifdef _OPENMP if (n >= OPENMPLIMIT) {omp_set_num_threads(omp_get_num_procs());} else {omp_set_num_threads(1);} #endif #pragma omp parallel for \ default(shared) private( i,iIn, iOut, trash) firstprivate(rostep, custep) schedule (static) for (i=0;i<n;i++){ iIn = i * rostep; iOut = i * custep; trash = ro2cu(&(ro[iIn]), &(cu[iOut]), p); } return 1; } int ro2euLoop (double* ro, double* eu, unsigned long long n, int p){ int trash; unsigned long long i, iIn, iOut, rostep = 4, eustep = 3; #ifdef _OPENMP if (n >= OPENMPLIMIT) {omp_set_num_threads(omp_get_num_procs());} else {omp_set_num_threads(1);} #endif #pragma omp parallel for \ default(shared) private( i,iIn, iOut, trash) firstprivate(rostep, eustep) schedule (static) for (i=0;i<n;i++){ iIn = i * rostep; iOut = i * eustep; trash = ro2eu(&(ro[iIn]), &(eu[iOut]), p); } return 1; } int ro2hoLoop (double* ro, double* ho, unsigned long long n, int p){ int trash; unsigned long long i, iIn, iOut, rostep = 4, hostep = 3; #ifdef _OPENMP if (n >= OPENMPLIMIT) {omp_set_num_threads(omp_get_num_procs());} else {omp_set_num_threads(1);} #endif #pragma omp parallel for \ default(shared) private( i,iIn, iOut, trash) firstprivate(rostep, hostep) schedule (static) for (i=0;i<n;i++){ iIn = i * rostep; iOut = i * hostep; trash = ro2ho(&(ro[iIn]), &(ho[iOut]), p); } return 1; } int ro2omLoop (double* ro, double* om, unsigned long long n, int p){ int trash; unsigned long long i, iIn, iOut, rostep = 4, omstep = 9; #ifdef _OPENMP if (n >= OPENMPLIMIT) {omp_set_num_threads(omp_get_num_procs());} else {omp_set_num_threads(1);} #endif #pragma omp parallel for \ default(shared) private( i,iIn, iOut, trash) firstprivate(rostep, omstep) schedule (static) for (i=0;i<n;i++){ iIn = i * rostep; iOut = i * omstep; trash = ro2om(&(ro[iIn]), &(om[iOut]), p); } return 1; } int ro2quLoop (double* ro, double* qu, unsigned long long n, int p){ int trash; unsigned long long i, iIn, iOut, rostep = 4, qustep = 4; #ifdef _OPENMP if (n >= OPENMPLIMIT) {omp_set_num_threads(omp_get_num_procs());} else {omp_set_num_threads(1);} #endif #pragma omp parallel for \ default(shared) private( i,iIn, iOut, trash) firstprivate(rostep, qustep) schedule (static) for (i=0;i<n;i++){ iIn = i * rostep; iOut = i * qustep; trash = ro2qu(&(ro[iIn]), &(qu[iOut]), p); } return 1; }
sudoku_estatico.c
#include <stdio.h> #include <stdlib.h> #include "ctimer.h" #include "sudoku.h" #include <omp.h> int main( int argc, char *argv[] ) { int sol[81]; if( argc < 2 ) { printf("Usage: %s profundidad\n",argv[0]); exit(1); } int profundidad; sscanf(argv[1],"%d",&profundidad); printf("sudoku inicial: \n"); //init_sudoku("normal",sol); init_sudoku("muy dificil",sol); prin_sudoku(sol); double tiempo, ucpu, scpu; ctimer( &tiempo, &ucpu, &scpu ); int A[3000][81]; int B[3000][81]; int nivel, nodo, k, l; for(int l = 0; l < 81; l++) A[0][l] = sol[l]; int tableros = 1; for( int nivel = 0; nivel < profundidad; nivel ++){ int j = 0; for( int nodo = 0; nodo < tableros; nodo++ ) { int k = 0; while( k < 81 && A[nodo][k] != 0 ) k++; if( k<81 ) { for( int i=1; i<=9; i++ ) { A[nodo][k] = i; if( es_factible( k/9+1, k%9+1, A[nodo] ) ) { for( int l = 0; l<81; l++ ) { B[j][l] = A[nodo][l]; } j++; } A[nodo][k] = 0; } } } tableros = j; for( int i = 0; i<tableros; i++ ) for( int k = 0; k<81; k++ ) A[i][k] = B[i][k]; } printf("Tableros = %d\n",tableros); #pragma omp parallel for schedule(runtime) for(int tablero = 0; tablero < tableros; tablero++) { int mascara[81]; for ( int i = 0; i < 81; i++ ) mascara[i] = A[tablero][i] != 0; sudoku_sol(1,1,A[tablero],mascara); } ctimer( &tiempo, &ucpu, &scpu ); printf("profundidad: %d Tiempo = %f\n",profundidad,tiempo); return 0; } void sudoku_sol( int i, int j, int sol[81], int mascara[81] ) { int k; if( mascara(i, j) == 0 ) { for( k = 1; k <= 9; k++ ) { sol( i, j ) = k; if( es_factible( i, j, sol ) ) { if( i == 9 && j == 9 ) { printf("Solucion: \n"); prin_sudoku(sol); } if( i < 9 && j == 9 ) { sudoku_sol ( i+1, 1, sol, mascara ); } if( i <= 9 && j < 9 ) { sudoku_sol( i, j+1, sol, mascara ); } } } sol(i, j) = 0; } else { if( i == 9 && j == 9 ) { printf("Solucion: \n"); prin_sudoku(sol); } if( i < 9 && j == 9 ) { sudoku_sol ( i+1, 1, sol, mascara ); } if( i <= 9 && j < 9 ) { sudoku_sol( i , j+1, sol, mascara ); } } }