source
stringlengths
3
92
c
stringlengths
26
2.25M
distribute_parallel_for_simd_misc_messages.c
// RUN: %clang_cc1 -fsyntax-only -fopenmp -verify %s // expected-error@+1 {{unexpected OpenMP directive '#pragma omp distribute parallel for simd'}} #pragma omp distribute parallel for simd // expected-error@+1 {{unexpected OpenMP directive '#pragma omp distribute parallel for simd'}} #pragma omp distribute parallel for simd foo void test_no_clause() { int i; #pragma omp distribute parallel for simd for (i = 0; i < 16; ++i) ; // expected-error@+2 {{statement after '#pragma omp distribute parallel for simd' must be a for loop}} #pragma omp distribute parallel for simd ++i; } void test_branch_protected_scope() { int i = 0; L1: ++i; int x[24]; #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd for (i = 0; i < 16; ++i) { if (i == 5) goto L1; // expected-error {{use of undeclared label 'L1'}} else if (i == 6) return; // expected-error {{cannot return from OpenMP region}} else if (i == 7) goto L2; else if (i == 8) { L2: x[i]++; } } if (x[0] == 0) goto L2; // expected-error {{use of undeclared label 'L2'}} else if (x[1] == 1) goto L1; } void test_invalid_clause() { int i; #pragma omp target #pragma omp teams // expected-warning@+1 {{extra tokens at the end of '#pragma omp distribute parallel for simd' are ignored}} #pragma omp distribute parallel for simd foo bar for (i = 0; i < 16; ++i) ; } void test_non_identifiers() { int i, x; #pragma omp target #pragma omp teams // expected-warning@+1 {{extra tokens at the end of '#pragma omp distribute parallel for simd' are ignored}} #pragma omp distribute parallel for simd; for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-warning@+1 {{extra tokens at the end of '#pragma omp distribute parallel for simd' are ignored}} #pragma omp distribute parallel for simd linear(x); for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-warning@+1 {{extra tokens at the end of '#pragma omp distribute parallel for simd' are ignored}} #pragma omp distribute parallel for simd private(x); for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-warning@+1 {{extra tokens at the end of '#pragma omp distribute parallel for simd' are ignored}} #pragma omp distribute parallel for simd, private(x); for (i = 0; i < 16; ++i) ; } extern int foo(); void test_safelen() { int i; #pragma omp target #pragma omp teams // expected-error@+1 {{expected '('}} #pragma omp distribute parallel for simd safelen for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd safelen( for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute parallel for simd safelen() for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd safelen(, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd safelen(, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-warning@+2 {{extra tokens at the end of '#pragma omp distribute parallel for simd' are ignored}} // expected-error@+1 {{expected '('}} #pragma omp distribute parallel for simd safelen 4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd safelen(4 for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd safelen(4, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd safelen(4, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd safelen(4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd safelen(4 4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd safelen(4, , 4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd safelen(4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd safelen(4, 8) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp distribute parallel for simd safelen(2.5) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp distribute parallel for simd safelen(foo()) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}} #pragma omp distribute parallel for simd safelen(-5) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}} #pragma omp distribute parallel for simd safelen(0) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}} #pragma omp distribute parallel for simd safelen(5 - 5) for (i = 0; i < 16; ++i) ; } void test_simdlen() { int i; #pragma omp target #pragma omp teams // expected-error@+1 {{expected '('}} #pragma omp distribute parallel for simd simdlen for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd simdlen( for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute parallel for simd simdlen() for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd simdlen(, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd simdlen(, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-warning@+2 {{extra tokens at the end of '#pragma omp distribute parallel for simd' are ignored}} // expected-error@+1 {{expected '('}} #pragma omp distribute parallel for simd simdlen 4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd simdlen(4 for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd simdlen(4, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd simdlen(4, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd simdlen(4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd simdlen(4 4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd simdlen(4, , 4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd simdlen(4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd simdlen(4, 8) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp distribute parallel for simd simdlen(2.5) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp distribute parallel for simd simdlen(foo()) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}} #pragma omp distribute parallel for simd simdlen(-5) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}} #pragma omp distribute parallel for simd simdlen(0) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}} #pragma omp distribute parallel for simd simdlen(5 - 5) for (i = 0; i < 16; ++i) ; } void test_collapse() { int i; #pragma omp target #pragma omp teams // expected-error@+1 {{expected '('}} #pragma omp distribute parallel for simd collapse for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd collapse( for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute parallel for simd collapse() for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd collapse(, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd collapse(, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-warning@+2 {{extra tokens at the end of '#pragma omp distribute parallel for simd' are ignored}} // expected-error@+1 {{expected '('}} #pragma omp distribute parallel for simd collapse 4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp distribute parallel for simd collapse(4 for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp distribute parallel for simd', but found only 1}} #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp distribute parallel for simd collapse(4, for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp distribute parallel for simd', but found only 1}} #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp distribute parallel for simd collapse(4, ) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp distribute parallel for simd', but found only 1}} #pragma omp target #pragma omp teams // expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp distribute parallel for simd collapse(4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp distribute parallel for simd', but found only 1}} #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp distribute parallel for simd collapse(4 4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp distribute parallel for simd', but found only 1}} #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp distribute parallel for simd collapse(4, , 4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp distribute parallel for simd', but found only 1}} #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd collapse(4) for (int i1 = 0; i1 < 16; ++i1) for (int i2 = 0; i2 < 16; ++i2) for (int i3 = 0; i3 < 16; ++i3) for (int i4 = 0; i4 < 16; ++i4) foo(); #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp distribute parallel for simd collapse(4, 8) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp distribute parallel for simd', but found only 1}} #pragma omp target #pragma omp teams // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp distribute parallel for simd collapse(2.5) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp distribute parallel for simd collapse(foo()) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp distribute parallel for simd collapse(-5) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp distribute parallel for simd collapse(0) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp distribute parallel for simd collapse(5 - 5) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd collapse(2) for (i = 0; i < 16; ++i) for (int j = 0; j < 16; ++j) // expected-error@+1 {{OpenMP constructs may not be nested inside a simd region}} #pragma omp distribute parallel for simd reduction(+ : i, j) for (int k = 0; k < 16; ++k) i += j; } void test_linear() { int i; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd linear( for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd linear(, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} #pragma omp distribute parallel for simd linear(, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute parallel for simd linear() for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute parallel for simd linear(int) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected variable name}} #pragma omp distribute parallel for simd linear(0) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{use of undeclared identifier 'x'}} #pragma omp distribute parallel for simd linear(x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{use of undeclared identifier 'x'}} // expected-error@+1 {{use of undeclared identifier 'y'}} #pragma omp distribute parallel for simd linear(x, y) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+3 {{use of undeclared identifier 'x'}} // expected-error@+2 {{use of undeclared identifier 'y'}} // expected-error@+1 {{use of undeclared identifier 'z'}} #pragma omp distribute parallel for simd linear(x, y, z) for (i = 0; i < 16; ++i) ; int x, y; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute parallel for simd linear(x :) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd linear(x :, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd linear(x : 1) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd linear(x : 2 * 2) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd linear(x : 1, y) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd linear(x : 1, y, z : 1) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-note@+2 {{defined as linear}} // expected-error@+1 {{linear variable cannot be linear}} #pragma omp distribute parallel for simd linear(x) linear(x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-note@+2 {{defined as private}} // expected-error@+1 {{private variable cannot be linear}} #pragma omp distribute parallel for simd private(x) linear(x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-note@+2 {{defined as linear}} // expected-error@+1 {{linear variable cannot be private}} #pragma omp distribute parallel for simd linear(x) private(x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-warning@+1 {{zero linear step (x and other variables in clause should probably be const)}} #pragma omp distribute parallel for simd linear(x, y : 0) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-note@+2 {{defined as linear}} // expected-error@+1 {{linear variable cannot be lastprivate}} #pragma omp distribute parallel for simd linear(x) lastprivate(x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-note@+2 {{defined as lastprivate}} // expected-error@+1 {{lastprivate variable cannot be linear}} #pragma omp distribute parallel for simd lastprivate(x) linear(x) for (i = 0; i < 16; ++i) ; } void test_aligned() { int i; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd aligned( for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd aligned(, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} #pragma omp distribute parallel for simd aligned(, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute parallel for simd aligned() for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute parallel for simd aligned(int) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected variable name}} #pragma omp distribute parallel for simd aligned(0) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{use of undeclared identifier 'x'}} #pragma omp distribute parallel for simd aligned(x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{use of undeclared identifier 'x'}} // expected-error@+1 {{use of undeclared identifier 'y'}} #pragma omp distribute parallel for simd aligned(x, y) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+3 {{use of undeclared identifier 'x'}} // expected-error@+2 {{use of undeclared identifier 'y'}} // expected-error@+1 {{use of undeclared identifier 'z'}} #pragma omp distribute parallel for simd aligned(x, y, z) for (i = 0; i < 16; ++i) ; int *x, y, z[25]; // expected-note 4 {{'y' defined here}} #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd aligned(x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd aligned(z) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute parallel for simd aligned(x :) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd aligned(x :, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd aligned(x : 1) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd aligned(x : 2 * 2) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd aligned(x : 1, y) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd aligned(x : 1, y, z : 1) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument of aligned clause should be array or pointer, not 'int'}} #pragma omp distribute parallel for simd aligned(x, y) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument of aligned clause should be array or pointer, not 'int'}} #pragma omp distribute parallel for simd aligned(x, y, z) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-note@+2 {{defined as aligned}} // expected-error@+1 {{a variable cannot appear in more than one aligned clause}} #pragma omp distribute parallel for simd aligned(x) aligned(z, x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-note@+3 {{defined as aligned}} // expected-error@+2 {{a variable cannot appear in more than one aligned clause}} // expected-error@+1 2 {{argument of aligned clause should be array or pointer, not 'int'}} #pragma omp distribute parallel for simd aligned(x, y, z) aligned(y, z) for (i = 0; i < 16; ++i) ; } void test_private() { int i; #pragma omp target #pragma omp teams // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd private( for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp distribute parallel for simd private(, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 2 {{expected expression}} #pragma omp distribute parallel for simd private(, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute parallel for simd private() for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute parallel for simd private(int) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected variable name}} #pragma omp distribute parallel for simd private(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd private(x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd private(x, y) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd private(x, y, z) for (i = 0; i < 16; ++i) { x = y * i + z; } } void test_lastprivate() { int i; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} #pragma omp distribute parallel for simd lastprivate( for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp distribute parallel for simd lastprivate(, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 2 {{expected expression}} #pragma omp distribute parallel for simd lastprivate(, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute parallel for simd lastprivate() for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute parallel for simd lastprivate(int) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected variable name}} #pragma omp distribute parallel for simd lastprivate(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd lastprivate(x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd lastprivate(x, y) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd lastprivate(x, y, z) for (i = 0; i < 16; ++i) ; } void test_firstprivate() { int i; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} #pragma omp distribute parallel for simd firstprivate( for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp distribute parallel for simd firstprivate(, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 2 {{expected expression}} #pragma omp distribute parallel for simd firstprivate(, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute parallel for simd firstprivate() for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute parallel for simd firstprivate(int) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected variable name}} #pragma omp distribute parallel for simd firstprivate(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd lastprivate(x) firstprivate(x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd lastprivate(x, y) firstprivate(x, y) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd lastprivate(x, y, z) firstprivate(x, y, z) for (i = 0; i < 16; ++i) ; } void test_loop_messages() { float a[100], b[100], c[100]; #pragma omp target #pragma omp teams // expected-error@+2 {{variable must be of integer or pointer type}} #pragma omp distribute parallel for simd for (float fi = 0; fi < 10.0; fi++) { c[(int)fi] = a[(int)fi] + b[(int)fi]; } #pragma omp target #pragma omp teams // expected-error@+2 {{variable must be of integer or pointer type}} #pragma omp distribute parallel for simd for (double fi = 0; fi < 10.0; fi++) { c[(int)fi] = a[(int)fi] + b[(int)fi]; } }
geeksforgeeks.c
// OpenMP header #include <omp.h> #include <stdio.h> #include <stdlib.h> int main(int argc, char* argv[]) { int nthreads, tid; // Begin of parallel region #pragma omp parallel private(nthreads, tid) { // Getting thread number tid = omp_get_thread_num(); printf("Welcome to GFG from thread = %d\n", tid); if (tid == 0) { // Only master thread does this nthreads = omp_get_num_threads(); printf("Number of threads = %d\n", nthreads); } } }
GB_binop__lor_int16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__lor_int16) // A.*B function (eWiseMult): GB (_AemultB_08__lor_int16) // A.*B function (eWiseMult): GB (_AemultB_02__lor_int16) // A.*B function (eWiseMult): GB (_AemultB_04__lor_int16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__lor_int16) // A*D function (colscale): GB (_AxD__lor_int16) // D*A function (rowscale): GB (_DxB__lor_int16) // C+=B function (dense accum): GB (_Cdense_accumB__lor_int16) // C+=b function (dense accum): GB (_Cdense_accumb__lor_int16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lor_int16) // C=scalar+B GB (_bind1st__lor_int16) // C=scalar+B' GB (_bind1st_tran__lor_int16) // C=A+scalar GB (_bind2nd__lor_int16) // C=A'+scalar GB (_bind2nd_tran__lor_int16) // C type: int16_t // A type: int16_t // A pattern? 0 // B type: int16_t // B pattern? 0 // BinaryOp: cij = ((aij != 0) || (bij != 0)) #define GB_ATYPE \ int16_t #define GB_BTYPE \ int16_t #define GB_CTYPE \ int16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int16_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int16_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = ((x != 0) || (y != 0)) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LOR || GxB_NO_INT16 || GxB_NO_LOR_INT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__lor_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__lor_int16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__lor_int16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int16_t int16_t bwork = (*((int16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__lor_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__lor_int16) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__lor_int16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int16_t alpha_scalar ; int16_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int16_t *) alpha_scalar_in)) ; beta_scalar = (*((int16_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__lor_int16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__lor_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__lor_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__lor_int16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__lor_int16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *Cx = (int16_t *) Cx_output ; int16_t x = (*((int16_t *) x_input)) ; int16_t *Bx = (int16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int16_t bij = GBX (Bx, p, false) ; Cx [p] = ((x != 0) || (bij != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__lor_int16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int16_t *Cx = (int16_t *) Cx_output ; int16_t *Ax = (int16_t *) Ax_input ; int16_t y = (*((int16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int16_t aij = GBX (Ax, p, false) ; Cx [p] = ((aij != 0) || (y != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = ((x != 0) || (aij != 0)) ; \ } GrB_Info GB (_bind1st_tran__lor_int16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t x = (*((const int16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = ((aij != 0) || (y != 0)) ; \ } GrB_Info GB (_bind2nd_tran__lor_int16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t y = (*((const int16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unaryop__abs_uint32_int16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_uint32_int16 // op(A') function: GB_tran__abs_uint32_int16 // C type: uint32_t // A type: int16_t // cast: uint32_t cij = (uint32_t) aij // unaryop: cij = aij #define GB_ATYPE \ int16_t #define GB_CTYPE \ uint32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, aij) \ uint32_t z = (uint32_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_UINT32 || GxB_NO_INT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_uint32_int16 ( uint32_t *Cx, // Cx and Ax may be aliased int16_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_uint32_int16 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unop__identity_fc32_uint64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__identity_fc32_uint64 // op(A') function: GB_unop_tran__identity_fc32_uint64 // C type: GxB_FC32_t // A type: uint64_t // cast: GxB_FC32_t cij = GxB_CMPLXF ((float) (aij), 0) // unaryop: cij = aij #define GB_ATYPE \ uint64_t #define GB_CTYPE \ GxB_FC32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FC32 || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__identity_fc32_uint64 ( GxB_FC32_t *Cx, // Cx and Ax may be aliased const uint64_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint64_t aij = Ax [p] ; GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; Cx [p] = z ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__identity_fc32_uint64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_subassign_04.c
//------------------------------------------------------------------------------ // GB_subassign_04: C(I,J) += A ; using S //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // Method 04: C(I,J) += A ; using S // M: NULL // Mask_comp: false // C_replace: false // accum: present // A: matrix // S: constructed // C: not bitmap: use GB_bitmap_assign instead // A: any sparsity structure. #include "GB_subassign_methods.h" GrB_Info GB_subassign_04 ( GrB_Matrix C, // input: const GrB_Index *I, const int64_t ni, const int64_t nI, const int Ikind, const int64_t Icolon [3], const GrB_Index *J, const int64_t nj, const int64_t nJ, const int Jkind, const int64_t Jcolon [3], const GrB_BinaryOp accum, const GrB_Matrix A, GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- ASSERT (!GB_IS_BITMAP (C)) ; ASSERT (!GB_aliased (C, A)) ; // NO ALIAS of C==A //-------------------------------------------------------------------------- // S = C(I,J) //-------------------------------------------------------------------------- GB_EMPTY_TASKLIST ; GB_OK (GB_subassign_symbolic (S, C, I, ni, J, nj, true, Context)) ; //-------------------------------------------------------------------------- // get inputs //-------------------------------------------------------------------------- GB_MATRIX_WAIT_IF_JUMBLED (A) ; GB_GET_C ; // C must not be bitmap GB_GET_A ; GB_GET_S ; GB_GET_ACCUM ; //-------------------------------------------------------------------------- // Method 04: C(I,J) += A ; using S //-------------------------------------------------------------------------- // Time: Close to Optimal. Every entry in A must be visited, and the // corresponding entry in S must then be found. Time for this phase is // Omega(nnz(A)), but S has already been constructed, in Omega(nnz(S)) // time. This method simply traverses all of A+S (like GB_add for // computing A+S), the same as Method 02. Time taken is O(nnz(A)+nnz(S)). // The only difference is that the traversal of A+S can terminate if A is // exhausted. Entries in S but not A do not actually require any work // (unlike Method 02, which must visit all entries in A+S). // Method 02 and Method 04 are somewhat similar. They differ on how C is // modified when the entry is present in S but not A. // TODO: phase2 of Method 02 and 04 are identical and could be // done in a single function. // Compare with Method 16, which computes C(I,J)<!M> += A, using S. //-------------------------------------------------------------------------- // Parallel: A+S (Methods 02, 04, 09, 10, 11, 12, 14, 16, 18, 20) //-------------------------------------------------------------------------- if (A_is_bitmap) { // all of IxJ must be examined GB_SUBASSIGN_IXJ_SLICE ; } else { // traverse all A+S GB_SUBASSIGN_TWO_SLICE (A, S) ; } //-------------------------------------------------------------------------- // phase 1: create zombies, update entries, and count pending tuples //-------------------------------------------------------------------------- if (A_is_bitmap) { //---------------------------------------------------------------------- // phase1: A is bitmap //---------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(+:nzombies) for (taskid = 0 ; taskid < ntasks ; taskid++) { //------------------------------------------------------------------ // get the task descriptor //------------------------------------------------------------------ GB_GET_IXJ_TASK_DESCRIPTOR_PHASE1 (iA_start, iA_end) ; //------------------------------------------------------------------ // compute all vectors in this task //------------------------------------------------------------------ for (int64_t j = kfirst ; j <= klast ; j++) { //-------------------------------------------------------------- // get S(iA_start:iA_end,j) //-------------------------------------------------------------- GB_GET_VECTOR_FOR_IXJ (S, iA_start) ; int64_t pA_start = j * Avlen ; //-------------------------------------------------------------- // do a 2-way merge of S(iA_start:iA_end,j) and A(ditto,j) //-------------------------------------------------------------- for (int64_t iA = iA_start ; iA < iA_end ; iA++) { int64_t pA = pA_start + iA ; bool Sfound = (pS < pS_end) && (GBI (Si, pS, Svlen) == iA) ; bool Afound = Ab [pA] ; if (Sfound && !Afound) { // ----[C . 1] or [X . 1]------------------------------- // S (i,j) is present but A (i,j) is not // [C . 1]: action: ( C ): no change, with accum // [X . 1]: action: ( X ): still a zombie GB_NEXT (S) ; } else if (!Sfound && Afound) { // ----[. A 1]------------------------------------------ // S (i,j) is not present, A (i,j) is present // [. A 1]: action: ( insert ) task_pending++ ; } else if (Sfound && Afound) { // ----[C A 1] or [X A 1]------------------------------- // both S (i,j) and A (i,j) present // [C A 1]: action: ( =C+A ): apply accum // [X A 1]: action: ( undelete ): zombie lives GB_C_S_LOOKUP ; GB_withaccum_C_A_1_matrix ; GB_NEXT (S) ; } } } GB_PHASE1_TASK_WRAPUP ; } } else { //---------------------------------------------------------------------- // phase1: A is hypersparse, sparse, or full //---------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(+:nzombies) for (taskid = 0 ; taskid < ntasks ; taskid++) { //------------------------------------------------------------------ // get the task descriptor //------------------------------------------------------------------ GB_GET_TASK_DESCRIPTOR_PHASE1 ; //------------------------------------------------------------------ // compute all vectors in this task //------------------------------------------------------------------ for (int64_t k = kfirst ; k <= klast ; k++) { //-------------------------------------------------------------- // get A(:,j) and S(:,j) //-------------------------------------------------------------- int64_t j = GBH (Zh, k) ; GB_GET_MAPPED (pA, pA_end, pA, pA_end, Ap, j, k, Z_to_X, Avlen); GB_GET_MAPPED (pS, pS_end, pB, pB_end, Sp, j, k, Z_to_S, Svlen); //-------------------------------------------------------------- // do a 2-way merge of S(:,j) and A(:,j) //-------------------------------------------------------------- // jC = J [j] ; or J is a colon expression // int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ; // while both list S (:,j) and A (:,j) have entries while (pS < pS_end && pA < pA_end) { int64_t iS = GBI (Si, pS, Svlen) ; int64_t iA = GBI (Ai, pA, Avlen) ; if (iS < iA) { // ----[C . 1] or [X . 1]------------------------------- // S (i,j) is present but A (i,j) is not // [C . 1]: action: ( C ): no change, with accum // [X . 1]: action: ( X ): still a zombie GB_NEXT (S) ; } else if (iA < iS) { // ----[. A 1]------------------------------------------ // S (i,j) is not present, A (i,j) is present // [. A 1]: action: ( insert ) task_pending++ ; GB_NEXT (A) ; } else { // ----[C A 1] or [X A 1]------------------------------- // both S (i,j) and A (i,j) present // [C A 1]: action: ( =C+A ): apply accum // [X A 1]: action: ( undelete ): zombie lives GB_C_S_LOOKUP ; GB_withaccum_C_A_1_matrix ; GB_NEXT (S) ; GB_NEXT (A) ; } } // ignore the remainder of S (:,j) // List A (:,j) has entries. List S (:,j) exhausted. task_pending += (pA_end - pA) ; } GB_PHASE1_TASK_WRAPUP ; } } //-------------------------------------------------------------------------- // phase 2: insert pending tuples //-------------------------------------------------------------------------- GB_PENDING_CUMSUM ; if (A_is_bitmap) { //---------------------------------------------------------------------- // phase2: A is bitmap //---------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(&&:pending_sorted) for (taskid = 0 ; taskid < ntasks ; taskid++) { //------------------------------------------------------------------ // get the task descriptor //------------------------------------------------------------------ GB_GET_IXJ_TASK_DESCRIPTOR_PHASE2 (iA_start, iA_end) ; //------------------------------------------------------------------ // compute all vectors in this task //------------------------------------------------------------------ for (int64_t j = kfirst ; j <= klast ; j++) { //-------------------------------------------------------------- // get S(iA_start:iA_end,j) //-------------------------------------------------------------- GB_GET_VECTOR_FOR_IXJ (S, iA_start) ; int64_t pA_start = j * Avlen ; //-------------------------------------------------------------- // do a 2-way merge of S(iA_start:iA_end,j) and A(ditto,j) //-------------------------------------------------------------- // jC = J [j] ; or J is a colon expression int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ; for (int64_t iA = iA_start ; iA < iA_end ; iA++) { int64_t pA = pA_start + iA ; bool Sfound = (pS < pS_end) && (GBI (Si, pS, Svlen) == iA) ; bool Afound = Ab [pA] ; if (!Sfound && Afound) { // ----[. A 1]------------------------------------------ // S (i,j) is not present, A (i,j) is present // [. A 1]: action: ( insert ) int64_t iC = GB_ijlist (I, iA, Ikind, Icolon) ; GB_PENDING_INSERT_aij ; GB_NEXT (A) ; } else if (Sfound) { // S (i,j) present GB_NEXT (S) ; } } } GB_PHASE2_TASK_WRAPUP ; } } else { //---------------------------------------------------------------------- // phase2: A is hypersparse, sparse, or full //---------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(&&:pending_sorted) for (taskid = 0 ; taskid < ntasks ; taskid++) { //------------------------------------------------------------------ // get the task descriptor //------------------------------------------------------------------ GB_GET_TASK_DESCRIPTOR_PHASE2 ; //------------------------------------------------------------------ // compute all vectors in this task //------------------------------------------------------------------ for (int64_t k = kfirst ; k <= klast ; k++) { //-------------------------------------------------------------- // get A(:,j) and S(:,j) //-------------------------------------------------------------- int64_t j = GBH (Zh, k) ; GB_GET_MAPPED (pA, pA_end, pA, pA_end, Ap, j, k, Z_to_X, Avlen); GB_GET_MAPPED (pS, pS_end, pB, pB_end, Sp, j, k, Z_to_S, Svlen); //-------------------------------------------------------------- // do a 2-way merge of S(:,j) and A(:,j) //-------------------------------------------------------------- // jC = J [j] ; or J is a colon expression int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ; // while both list S (:,j) and A (:,j) have entries while (pS < pS_end && pA < pA_end) { int64_t iS = GBI (Si, pS, Svlen) ; int64_t iA = GBI (Ai, pA, Avlen) ; if (iS < iA) { GB_NEXT (S) ; } else if (iA < iS) { // ----[. A 1]------------------------------------------ // S (i,j) is not present, A (i,j) is present // [. A 1]: action: ( insert ) int64_t iC = GB_ijlist (I, iA, Ikind, Icolon) ; GB_PENDING_INSERT_aij ; GB_NEXT (A) ; } else { GB_NEXT (S) ; GB_NEXT (A) ; } } // ignore the remainder of S (:,j) // while list A (:,j) has entries. List S (:,j) exhausted. while (pA < pA_end) { // ----[. A 1]---------------------------------------------- // S (i,j) is not present, A (i,j) is present // [. A 1]: action: ( insert ) int64_t iA = GBI (Ai, pA, Avlen) ; int64_t iC = GB_ijlist (I, iA, Ikind, Icolon) ; GB_PENDING_INSERT_aij ; GB_NEXT (A) ; } } GB_PHASE2_TASK_WRAPUP ; } } //-------------------------------------------------------------------------- // finalize the matrix and return result //-------------------------------------------------------------------------- GB_SUBASSIGN_WRAPUP ; }
rdTest1.c
int main() { int i = 0; int w; int wPrime = 10; int u, q; #pragma omp parallel { while (1) { i++; #pragma omp single { w = wPrime; u = 11; } if (i >= 100) { break; } u = wPrime + 1; q = w + 1; wPrime = w; } wPrime = w + 1; } }
GB_binop__isne_int64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__isne_int64) // A.*B function (eWiseMult): GB (_AemultB_08__isne_int64) // A.*B function (eWiseMult): GB (_AemultB_02__isne_int64) // A.*B function (eWiseMult): GB (_AemultB_04__isne_int64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__isne_int64) // A*D function (colscale): GB (_AxD__isne_int64) // D*A function (rowscale): GB (_DxB__isne_int64) // C+=B function (dense accum): GB (_Cdense_accumB__isne_int64) // C+=b function (dense accum): GB (_Cdense_accumb__isne_int64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isne_int64) // C=scalar+B GB (_bind1st__isne_int64) // C=scalar+B' GB (_bind1st_tran__isne_int64) // C=A+scalar GB (_bind2nd__isne_int64) // C=A'+scalar GB (_bind2nd_tran__isne_int64) // C type: int64_t // A type: int64_t // A pattern? 0 // B type: int64_t // B pattern? 0 // BinaryOp: cij = (aij != bij) #define GB_ATYPE \ int64_t #define GB_BTYPE \ int64_t #define GB_CTYPE \ int64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int64_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int64_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x != y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISNE || GxB_NO_INT64 || GxB_NO_ISNE_INT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__isne_int64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__isne_int64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__isne_int64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int64_t int64_t bwork = (*((int64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__isne_int64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *restrict Cx = (int64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__isne_int64) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *restrict Cx = (int64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__isne_int64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int64_t alpha_scalar ; int64_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int64_t *) alpha_scalar_in)) ; beta_scalar = (*((int64_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__isne_int64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__isne_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__isne_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__isne_int64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__isne_int64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *Cx = (int64_t *) Cx_output ; int64_t x = (*((int64_t *) x_input)) ; int64_t *Bx = (int64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int64_t bij = GBX (Bx, p, false) ; Cx [p] = (x != bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__isne_int64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int64_t *Cx = (int64_t *) Cx_output ; int64_t *Ax = (int64_t *) Ax_input ; int64_t y = (*((int64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int64_t aij = GBX (Ax, p, false) ; Cx [p] = (aij != y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x != aij) ; \ } GrB_Info GB (_bind1st_tran__isne_int64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t x = (*((const int64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij != y) ; \ } GrB_Info GB (_bind2nd_tran__isne_int64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t y = (*((const int64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
DRB021-reductionmissing-orig-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* A kernel with two level parallelizable loop with reduction: if reduction(+:sum) is missing, there is race condition. Data race pairs: we allow multiple pairs to preserve the pattern. sum@70:7 vs. sum@70:7 sum@70:7 vs. sum@70:13 */ #include <stdio.h> int main(int argc, char* argv[]) { int i,j; float temp, sum=0.0; int len=100; float u[100][100]; #pragma omp parallel for private(i, j) for (i = 0; i < len; i++) #pragma omp parallel for private(j) for (j = 0; j < len; j++) u[i][j] = 0.5; #pragma omp parallel for private (temp, j) reduction(+:sum) collapse(2) for (i = 0; i < len; i++) for (j = 0; j < len; j++) { temp = u[i][j]; sum = sum + temp * temp; } printf ("sum = %f\n", sum); return 0; }
GeneralMatrixMatrix.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_GENERAL_MATRIX_MATRIX_H #define EIGEN_GENERAL_MATRIX_MATRIX_H namespace Eigen { namespace internal { template<typename _LhsScalar, typename _RhsScalar> class level3_blocking; /* Specialization for a row-major destination A => simple transposition of the product */ template< typename Index, typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs, typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs> struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,RowMajor> { typedef gebp_traits<RhsScalar,LhsScalar> Traits; typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar; static EIGEN_STRONG_INLINE void run( Index rows, Index cols, Index depth, const LhsScalar* lhs, Index lhsStride, const RhsScalar* rhs, Index rhsStride, ResScalar* res, Index resStride, ResScalar alpha, level3_blocking<RhsScalar,LhsScalar>& blocking, GemmParallelInfo<Index>* info = 0) { // transpose the product such that the result is column major general_matrix_matrix_product<Index, RhsScalar, RhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateRhs, LhsScalar, LhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateLhs, ColMajor> ::run(cols,rows,depth,rhs,rhsStride,lhs,lhsStride,res,resStride,alpha,blocking,info); } }; /* Specialization for a col-major destination A * => Blocking algorithm following Goto's paper */ template< typename Index, typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs, typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs> struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,ColMajor> { typedef gebp_traits<LhsScalar,RhsScalar> Traits; typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar; static void run(Index rows, Index cols, Index depth, const LhsScalar* _lhs, Index lhsStride, const RhsScalar* _rhs, Index rhsStride, ResScalar* _res, Index resStride, ResScalar alpha, level3_blocking<LhsScalar,RhsScalar>& blocking, GemmParallelInfo<Index>* info = 0) { typedef const_blas_data_mapper<LhsScalar, Index, LhsStorageOrder> LhsMapper; typedef const_blas_data_mapper<RhsScalar, Index, RhsStorageOrder> RhsMapper; typedef blas_data_mapper<typename Traits::ResScalar, Index, ColMajor> ResMapper; LhsMapper lhs(_lhs,lhsStride); RhsMapper rhs(_rhs,rhsStride); ResMapper res(_res, resStride); Index kc = blocking.kc(); // cache block size along the K direction Index mc = (std::min)(rows,blocking.mc()); // cache block size along the M direction Index nc = (std::min)(cols,blocking.nc()); // cache block size along the N direction gemm_pack_lhs<LhsScalar, Index, LhsMapper, Traits::mr, Traits::LhsProgress, LhsStorageOrder> pack_lhs; gemm_pack_rhs<RhsScalar, Index, RhsMapper, Traits::nr, RhsStorageOrder> pack_rhs; gebp_kernel<LhsScalar, RhsScalar, Index, ResMapper, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs> gebp; #ifdef EIGEN_HAS_OPENMP if(info) { // this is the parallel version! int tid = omp_get_thread_num(); int threads = omp_get_num_threads(); LhsScalar* blockA = blocking.blockA(); eigen_internal_assert(blockA!=0); std::size_t sizeB = kc*nc; ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, sizeB, 0); // For each horizontal panel of the rhs, and corresponding vertical panel of the lhs... for(Index k=0; k<depth; k+=kc) { const Index actual_kc = (std::min)(k+kc,depth)-k; // => rows of B', and cols of the A' // In order to reduce the chance that a thread has to wait for the other, // let's start by packing B'. pack_rhs(blockB, rhs.getSubMapper(k,0), actual_kc, nc); // Pack A_k to A' in a parallel fashion: // each thread packs the sub block A_k,i to A'_i where i is the thread id. // However, before copying to A'_i, we have to make sure that no other thread is still using it, // i.e., we test that info[tid].users equals 0. // Then, we set info[tid].users to the number of threads to mark that all other threads are going to use it. while(info[tid].users!=0) {} info[tid].users += threads; pack_lhs(blockA+info[tid].lhs_start*actual_kc, lhs.getSubMapper(info[tid].lhs_start,k), actual_kc, info[tid].lhs_length); // Notify the other threads that the part A'_i is ready to go. info[tid].sync = k; // Computes C_i += A' * B' per A'_i for(int shift=0; shift<threads; ++shift) { int i = (tid+shift)%threads; // At this point we have to make sure that A'_i has been updated by the thread i, // we use testAndSetOrdered to mimic a volatile access. // However, no need to wait for the B' part which has been updated by the current thread! if (shift>0) { while(info[i].sync!=k) { } } gebp(res.getSubMapper(info[i].lhs_start, 0), blockA+info[i].lhs_start*actual_kc, blockB, info[i].lhs_length, actual_kc, nc, alpha); } // Then keep going as usual with the remaining B' for(Index j=nc; j<cols; j+=nc) { const Index actual_nc = (std::min)(j+nc,cols)-j; // pack B_k,j to B' pack_rhs(blockB, rhs.getSubMapper(k,j), actual_kc, actual_nc); // C_j += A' * B' gebp(res.getSubMapper(0, j), blockA, blockB, rows, actual_kc, actual_nc, alpha); } // Release all the sub blocks A'_i of A' for the current thread, // i.e., we simply decrement the number of users by 1 for(Index i=0; i<threads; ++i) #pragma omp atomic info[i].users -= 1; } } else #endif // EIGEN_HAS_OPENMP { EIGEN_UNUSED_VARIABLE(info); // this is the sequential version! std::size_t sizeA = kc*mc; std::size_t sizeB = kc*nc; ei_declare_aligned_stack_constructed_variable(LhsScalar, blockA, sizeA, blocking.blockA()); ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, sizeB, blocking.blockB()); const bool pack_rhs_once = mc!=rows && kc==depth && nc==cols; // For each horizontal panel of the rhs, and corresponding panel of the lhs... for(Index i2=0; i2<rows; i2+=mc) { const Index actual_mc = (std::min)(i2+mc,rows)-i2; for(Index k2=0; k2<depth; k2+=kc) { const Index actual_kc = (std::min)(k2+kc,depth)-k2; // OK, here we have selected one horizontal panel of rhs and one vertical panel of lhs. // => Pack lhs's panel into a sequential chunk of memory (L2/L3 caching) // Note that this panel will be read as many times as the number of blocks in the rhs's // horizontal panel which is, in practice, a very low number. pack_lhs(blockA, lhs.getSubMapper(i2,k2), actual_kc, actual_mc); // For each kc x nc block of the rhs's horizontal panel... for(Index j2=0; j2<cols; j2+=nc) { const Index actual_nc = (std::min)(j2+nc,cols)-j2; // We pack the rhs's block into a sequential chunk of memory (L2 caching) // Note that this block will be read a very high number of times, which is equal to the number of // micro horizontal panel of the large rhs's panel (e.g., rows/12 times). if((!pack_rhs_once) || i2==0) pack_rhs(blockB, rhs.getSubMapper(k2,j2), actual_kc, actual_nc); // Everything is packed, we can now call the panel * block kernel: gebp(res.getSubMapper(i2, j2), blockA, blockB, actual_mc, actual_kc, actual_nc, alpha); } } } } } }; /********************************************************************************* * Specialization of generic_product_impl for "large" GEMM, i.e., * implementation of the high level wrapper to general_matrix_matrix_product **********************************************************************************/ template<typename Scalar, typename Index, typename Gemm, typename Lhs, typename Rhs, typename Dest, typename BlockingType> struct gemm_functor { gemm_functor(const Lhs& lhs, const Rhs& rhs, Dest& dest, const Scalar& actualAlpha, BlockingType& blocking) : m_lhs(lhs), m_rhs(rhs), m_dest(dest), m_actualAlpha(actualAlpha), m_blocking(blocking) {} void initParallelSession(Index num_threads) const { m_blocking.initParallel(m_lhs.rows(), m_rhs.cols(), m_lhs.cols(), num_threads); m_blocking.allocateA(); } void operator() (Index row, Index rows, Index col=0, Index cols=-1, GemmParallelInfo<Index>* info=0) const { if(cols==-1) cols = m_rhs.cols(); Gemm::run(rows, cols, m_lhs.cols(), &m_lhs.coeffRef(row,0), m_lhs.outerStride(), &m_rhs.coeffRef(0,col), m_rhs.outerStride(), (Scalar*)&(m_dest.coeffRef(row,col)), m_dest.outerStride(), m_actualAlpha, m_blocking, info); } typedef typename Gemm::Traits Traits; protected: const Lhs& m_lhs; const Rhs& m_rhs; Dest& m_dest; Scalar m_actualAlpha; BlockingType& m_blocking; }; template<int StorageOrder, typename LhsScalar, typename RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor=1, bool FiniteAtCompileTime = MaxRows!=Dynamic && MaxCols!=Dynamic && MaxDepth != Dynamic> class gemm_blocking_space; template<typename _LhsScalar, typename _RhsScalar> class level3_blocking { typedef _LhsScalar LhsScalar; typedef _RhsScalar RhsScalar; protected: LhsScalar* m_blockA; RhsScalar* m_blockB; Index m_mc; Index m_nc; Index m_kc; public: level3_blocking() : m_blockA(0), m_blockB(0), m_mc(0), m_nc(0), m_kc(0) {} inline Index mc() const { return m_mc; } inline Index nc() const { return m_nc; } inline Index kc() const { return m_kc; } inline LhsScalar* blockA() { return m_blockA; } inline RhsScalar* blockB() { return m_blockB; } }; template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor> class gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, MaxDepth, KcFactor, true /* == FiniteAtCompileTime */> : public level3_blocking< typename conditional<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::type, typename conditional<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::type> { enum { Transpose = StorageOrder==RowMajor, ActualRows = Transpose ? MaxCols : MaxRows, ActualCols = Transpose ? MaxRows : MaxCols }; typedef typename conditional<Transpose,_RhsScalar,_LhsScalar>::type LhsScalar; typedef typename conditional<Transpose,_LhsScalar,_RhsScalar>::type RhsScalar; typedef gebp_traits<LhsScalar,RhsScalar> Traits; enum { SizeA = ActualRows * MaxDepth, SizeB = ActualCols * MaxDepth }; #if EIGEN_MAX_STATIC_ALIGN_BYTES >= EIGEN_DEFAULT_ALIGN_BYTES EIGEN_ALIGN_MAX LhsScalar m_staticA[SizeA]; EIGEN_ALIGN_MAX RhsScalar m_staticB[SizeB]; #else EIGEN_ALIGN_MAX char m_staticA[SizeA * sizeof(LhsScalar) + EIGEN_DEFAULT_ALIGN_BYTES-1]; EIGEN_ALIGN_MAX char m_staticB[SizeB * sizeof(RhsScalar) + EIGEN_DEFAULT_ALIGN_BYTES-1]; #endif public: gemm_blocking_space(Index /*rows*/, Index /*cols*/, Index /*depth*/, Index /*num_threads*/, bool /*full_rows = false*/) { this->m_mc = ActualRows; this->m_nc = ActualCols; this->m_kc = MaxDepth; #if EIGEN_MAX_STATIC_ALIGN_BYTES >= EIGEN_DEFAULT_ALIGN_BYTES this->m_blockA = m_staticA; this->m_blockB = m_staticB; #else this->m_blockA = reinterpret_cast<LhsScalar*>((internal::UIntPtr(m_staticA) + (EIGEN_DEFAULT_ALIGN_BYTES-1)) & ~std::size_t(EIGEN_DEFAULT_ALIGN_BYTES-1)); this->m_blockB = reinterpret_cast<RhsScalar*>((internal::UIntPtr(m_staticB) + (EIGEN_DEFAULT_ALIGN_BYTES-1)) & ~std::size_t(EIGEN_DEFAULT_ALIGN_BYTES-1)); #endif } void initParallel(Index, Index, Index, Index) {} inline void allocateA() {} inline void allocateB() {} inline void allocateAll() {} }; template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor> class gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, MaxDepth, KcFactor, false> : public level3_blocking< typename conditional<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::type, typename conditional<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::type> { enum { Transpose = StorageOrder==RowMajor }; typedef typename conditional<Transpose,_RhsScalar,_LhsScalar>::type LhsScalar; typedef typename conditional<Transpose,_LhsScalar,_RhsScalar>::type RhsScalar; typedef gebp_traits<LhsScalar,RhsScalar> Traits; Index m_sizeA; Index m_sizeB; public: gemm_blocking_space(Index rows, Index cols, Index depth, Index num_threads, bool l3_blocking) { this->m_mc = Transpose ? cols : rows; this->m_nc = Transpose ? rows : cols; this->m_kc = depth; if(l3_blocking) { computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, this->m_mc, this->m_nc, num_threads); } else // no l3 blocking { Index n = this->m_nc; computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, this->m_mc, n, num_threads); } m_sizeA = this->m_mc * this->m_kc; m_sizeB = this->m_kc * this->m_nc; } void initParallel(Index rows, Index cols, Index depth, Index num_threads) { this->m_mc = Transpose ? cols : rows; this->m_nc = Transpose ? rows : cols; this->m_kc = depth; eigen_internal_assert(this->m_blockA==0 && this->m_blockB==0); Index m = this->m_mc; computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, m, this->m_nc, num_threads); m_sizeA = this->m_mc * this->m_kc; m_sizeB = this->m_kc * this->m_nc; } void allocateA() { if(this->m_blockA==0) this->m_blockA = aligned_new<LhsScalar>(m_sizeA); } void allocateB() { if(this->m_blockB==0) this->m_blockB = aligned_new<RhsScalar>(m_sizeB); } void allocateAll() { allocateA(); allocateB(); } ~gemm_blocking_space() { aligned_delete(this->m_blockA, m_sizeA); aligned_delete(this->m_blockB, m_sizeB); } }; } // end namespace internal namespace internal { template<typename Lhs, typename Rhs> struct generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,GemmProduct> : generic_product_impl_base<Lhs,Rhs,generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,GemmProduct> > { typedef typename Product<Lhs,Rhs>::Scalar Scalar; typedef typename Lhs::Scalar LhsScalar; typedef typename Rhs::Scalar RhsScalar; typedef internal::blas_traits<Lhs> LhsBlasTraits; typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhsType; typedef typename internal::remove_all<ActualLhsType>::type ActualLhsTypeCleaned; typedef internal::blas_traits<Rhs> RhsBlasTraits; typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhsType; typedef typename internal::remove_all<ActualRhsType>::type ActualRhsTypeCleaned; enum { MaxDepthAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(Lhs::MaxColsAtCompileTime,Rhs::MaxRowsAtCompileTime) }; typedef generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,CoeffBasedProductMode> lazyproduct; template<typename Dst> static void evalTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) { // See http://eigen.tuxfamily.org/bz/show_bug.cgi?id=404 for a discussion and helper program // to determine the following heuristic. // EIGEN_GEMM_TO_COEFFBASED_THRESHOLD is typically defined to 20 in GeneralProduct.h, // unless it has been specialized by the user or for a given architecture. // Note that the condition rhs.rows()>0 was required because lazy produc is (was?) not happy with empty inputs. // I'm not sure it is still required. if((rhs.rows()+dst.rows()+dst.cols())<EIGEN_GEMM_TO_COEFFBASED_THRESHOLD && rhs.rows()>0) lazyproduct::evalTo(dst, lhs, rhs); else { dst.setZero(); scaleAndAddTo(dst, lhs, rhs, Scalar(1)); } } template<typename Dst> static void addTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) { if((rhs.rows()+dst.rows()+dst.cols())<EIGEN_GEMM_TO_COEFFBASED_THRESHOLD && rhs.rows()>0) lazyproduct::addTo(dst, lhs, rhs); else scaleAndAddTo(dst,lhs, rhs, Scalar(1)); } template<typename Dst> static void subTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) { if((rhs.rows()+dst.rows()+dst.cols())<EIGEN_GEMM_TO_COEFFBASED_THRESHOLD && rhs.rows()>0) lazyproduct::subTo(dst, lhs, rhs); else scaleAndAddTo(dst, lhs, rhs, Scalar(-1)); } template<typename Dest> static void scaleAndAddTo(Dest& dst, const Lhs& a_lhs, const Rhs& a_rhs, const Scalar& alpha) { eigen_assert(dst.rows()==a_lhs.rows() && dst.cols()==a_rhs.cols()); if(a_lhs.cols()==0 || a_lhs.rows()==0 || a_rhs.cols()==0) return; typename internal::add_const_on_value_type<ActualLhsType>::type lhs = LhsBlasTraits::extract(a_lhs); typename internal::add_const_on_value_type<ActualRhsType>::type rhs = RhsBlasTraits::extract(a_rhs); Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(a_lhs) * RhsBlasTraits::extractScalarFactor(a_rhs); typedef internal::gemm_blocking_space<(Dest::Flags&RowMajorBit) ? RowMajor : ColMajor,LhsScalar,RhsScalar, Dest::MaxRowsAtCompileTime,Dest::MaxColsAtCompileTime,MaxDepthAtCompileTime> BlockingType; typedef internal::gemm_functor< Scalar, Index, internal::general_matrix_matrix_product< Index, LhsScalar, (ActualLhsTypeCleaned::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(LhsBlasTraits::NeedToConjugate), RhsScalar, (ActualRhsTypeCleaned::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(RhsBlasTraits::NeedToConjugate), (Dest::Flags&RowMajorBit) ? RowMajor : ColMajor>, ActualLhsTypeCleaned, ActualRhsTypeCleaned, Dest, BlockingType> GemmFunctor; BlockingType blocking(dst.rows(), dst.cols(), lhs.cols(), 1, true); internal::parallelize_gemm<(Dest::MaxRowsAtCompileTime>32 || Dest::MaxRowsAtCompileTime==Dynamic)> (GemmFunctor(lhs, rhs, dst, actualAlpha, blocking), a_lhs.rows(), a_rhs.cols(), a_lhs.cols(), Dest::Flags&RowMajorBit); } }; } // end namespace internal } // end namespace Eigen #endif // EIGEN_GENERAL_MATRIX_MATRIX_H
atomic.c
////////////////////////////////////////////////////////////// // // atomic.c // // Copyright (c) 2017, Hassan Salehe Matar // All rights reserved. // // This file is part of Clanomp. For details, see // https://github.com/hassansalehe/Clanomp. Please also // see the LICENSE file for additional BSD notice // // Redistribution and use in source and binary forms, with // or without modification, are permitted provided that // the following conditions are met: // // * Redistributions of source code must retain the above // copyright notice, this list of conditions and the // following disclaimer. // // * Redistributions in binary form must reproduce the // above copyright notice, this list of conditions and // the following disclaimer in the documentation and/or // other materials provided with the distribution. // // * Neither the name of the copyright holder nor the names // of its contributors may be used to endorse or promote // products derived from this software without specific // prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND // CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, // INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF // MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, // BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF // SUCH DAMAGE. // ////////////////////////////////////////////////////////////// // From the OpenMP specification: // * "atomic" ensures specific memory location is accessed // atomically // // * #pragma omp atomic (read|write|update|capture) // expression <statement|block> // // References: // 1. http://www.openmp.org/wp-content/uploads/openmp-examples-4.5.0.pdf // 2. http://www.openmp.org/wp-content/uploads/openmp-4.5.pdf #include <stdio.h> #include <omp.h> int main() { int count = 0; #pragma omp parallel shared(count) { #pragma omp atomic count++; } printf("Value of count: %d, construct: <atomic>\n", count); return 0; }
bml_adjungate_triangle_ellpack_typed.c
#include "../../macros.h" #include "../../typed.h" #include "../bml_introspection.h" #include "../bml_logger.h" #include "../bml_types.h" #include "bml_adjungate_triangle_ellpack.h" #include "bml_types_ellpack.h" #include <stdio.h> #include <stdlib.h> #include <complex.h> #include <math.h> #ifdef _OPENMP #include <omp.h> #endif /** Adjungates a triangle of a matrix in place. * * \ingroup adjungate_triangle_group * * \param A[in,out] The matrix for which the triangle should be adjungated * \param triangle[out] Which triangle to adjungate ('u': upper, 'l': lower) * * WARNING: Please verify race conditions and parallel performances. */ void TYPED_FUNC( bml_adjungate_triangle_ellpack) ( bml_matrix_ellpack_t * A, char *triangle) { int A_N = A->N; int A_M = A->M; int l; int ll; int j; REAL_T *A_value = (REAL_T *) A->value; int *A_index = A->index; int *A_nnz = A->nnz; #ifdef _OPENMP omp_lock_t lock[A_M]; #endif switch (*triangle) { case 'u': #ifdef _OPENMP for (int i = 0; i < A_M; i++) omp_init_lock(&(lock[i])); #endif #if defined(USE_OMP_OFFLOAD) #pragma omp target update from(A_value[:A_N*A_M], A_index[:A_N*A_M], A_nnz[:A_N]) #endif #pragma omp parallel for \ shared(A_N, A_M, A_index, A_nnz, A_value, lock) \ private(l, ll) // WARNING: Please, check for race conditions ... for (int i = 0; i < A_N; i++) // For every row { l = A_nnz[i]; for (int j = 0; j < l; j++) // We search for indices gt 0. { ll = A_index[ROWMAJOR(i, j, A_N, A_M)]; if (ll > 0) { if (ll > i) { #ifdef _OPENMP omp_set_lock(&(lock[ll])); #endif A_index[ROWMAJOR(ll, A_nnz[ll], A_N, A_M)] = i; A_value[ROWMAJOR(ll, A_nnz[ll], A_N, A_M)] = conj(A_value[ROWMAJOR(i, j, A_N, A_M)]); A_nnz[ll]++; #ifdef _OPENMP omp_unset_lock(&(lock[ll])); #endif } } } } #ifdef _OPENMP for (int i = 0; i < A_M; i++) omp_destroy_lock(&(lock[i])); #endif break; case 'l': #ifdef _OPENMP for (int i = 0; i < A_M; i++) omp_init_lock(&(lock[i])); #endif #pragma omp parallel for \ shared(lock, A_N, A_M, A_index, A_nnz, A_value) \ private(l, ll) // WARNING: Please, check for race conditions and parallel performances ... for (int i = 0; i < A_N; i++) { l = A_nnz[i]; for (int j = 0; j < l; j++) { ll = A_index[ROWMAJOR(i, j, A_N, A_M)]; if (ll >= 0) { if (ll < i) { #ifdef _OPENMP omp_set_lock(&(lock[ll])); #endif A_index[ROWMAJOR(ll, A_nnz[ll], A_N, A_M)] = i; A_value[ROWMAJOR(ll, A_nnz[ll], A_N, A_M)] = conj(A_value[ROWMAJOR(i, j, A_N, A_M)]); A_nnz[ll]++; #ifdef _OPENMP omp_unset_lock(&(lock[ll])); #endif } } } } #ifdef _OPENMP for (int i = 0; i < A_M; i++) omp_destroy_lock(&(lock[i])); #endif break; default: LOG_ERROR("unknown triangle %c\n", triangle); break; } #if defined(USE_OMP_OFFLOAD) #pragma omp target update to(A_value[:A_N*A_M], A_index[:A_N*A_M], A_nnz[:A_N]) #endif }
GB_unaryop__ainv_int16_uint32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_int16_uint32 // op(A') function: GB_tran__ainv_int16_uint32 // C type: int16_t // A type: uint32_t // cast: int16_t cij = (int16_t) aij // unaryop: cij = -aij #define GB_ATYPE \ uint32_t #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, aij) \ int16_t z = (int16_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_INT16 || GxB_NO_UINT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_int16_uint32 ( int16_t *Cx, // Cx and Ax may be aliased uint32_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_int16_uint32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
statistic.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % SSSSS TTTTT AAA TTTTT IIIII SSSSS TTTTT IIIII CCCC % % SS T A A T I SS T I C % % SSS T AAAAA T I SSS T I C % % SS T A A T I SS T I C % % SSSSS T A A T IIIII SSSSS T IIIII CCCC % % % % % % MagickCore Image Statistical Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2014 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/accelerate.h" #include "magick/animate.h" #include "magick/animate.h" #include "magick/blob.h" #include "magick/blob-private.h" #include "magick/cache.h" #include "magick/cache-private.h" #include "magick/cache-view.h" #include "magick/client.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/composite.h" #include "magick/composite-private.h" #include "magick/compress.h" #include "magick/constitute.h" #include "magick/deprecate.h" #include "magick/display.h" #include "magick/draw.h" #include "magick/enhance.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/gem.h" #include "magick/geometry.h" #include "magick/list.h" #include "magick/image-private.h" #include "magick/magic.h" #include "magick/magick.h" #include "magick/memory_.h" #include "magick/module.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/paint.h" #include "magick/pixel-private.h" #include "magick/profile.h" #include "magick/property.h" #include "magick/quantize.h" #include "magick/random_.h" #include "magick/random-private.h" #include "magick/resource_.h" #include "magick/segment.h" #include "magick/semaphore.h" #include "magick/signature-private.h" #include "magick/statistic.h" #include "magick/string_.h" #include "magick/thread-private.h" #include "magick/timer.h" #include "magick/utility.h" #include "magick/version.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E v a l u a t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % EvaluateImage() applies a value to the image with an arithmetic, relational, % or logical operator to an image. Use these operations to lighten or darken % an image, to increase or decrease contrast in an image, or to produce the % "negative" of an image. % % The format of the EvaluateImageChannel method is: % % MagickBooleanType EvaluateImage(Image *image, % const MagickEvaluateOperator op,const double value, % ExceptionInfo *exception) % MagickBooleanType EvaluateImages(Image *images, % const MagickEvaluateOperator op,const double value, % ExceptionInfo *exception) % MagickBooleanType EvaluateImageChannel(Image *image, % const ChannelType channel,const MagickEvaluateOperator op, % const double value,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o op: A channel op. % % o value: A value value. % % o exception: return any errors or warnings in this structure. % */ static MagickPixelPacket **DestroyPixelThreadSet(MagickPixelPacket **pixels) { register ssize_t i; assert(pixels != (MagickPixelPacket **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (pixels[i] != (MagickPixelPacket *) NULL) pixels[i]=(MagickPixelPacket *) RelinquishMagickMemory(pixels[i]); pixels=(MagickPixelPacket **) RelinquishMagickMemory(pixels); return(pixels); } static MagickPixelPacket **AcquirePixelThreadSet(const Image *image, const size_t number_images) { register ssize_t i, j; MagickPixelPacket **pixels; size_t length, number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); pixels=(MagickPixelPacket **) AcquireQuantumMemory(number_threads, sizeof(*pixels)); if (pixels == (MagickPixelPacket **) NULL) return((MagickPixelPacket **) NULL); (void) ResetMagickMemory(pixels,0,number_threads*sizeof(*pixels)); for (i=0; i < (ssize_t) number_threads; i++) { length=image->columns; if (length < number_images) length=number_images; pixels[i]=(MagickPixelPacket *) AcquireQuantumMemory(length, sizeof(**pixels)); if (pixels[i] == (MagickPixelPacket *) NULL) return(DestroyPixelThreadSet(pixels)); for (j=0; j < (ssize_t) length; j++) GetMagickPixelPacket(image,&pixels[i][j]); } return(pixels); } static inline double EvaluateMax(const double x,const double y) { if (x > y) return(x); return(y); } #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static int IntensityCompare(const void *x,const void *y) { const MagickPixelPacket *color_1, *color_2; int intensity; color_1=(const MagickPixelPacket *) x; color_2=(const MagickPixelPacket *) y; intensity=(int) MagickPixelIntensity(color_2)- (int) MagickPixelIntensity(color_1); return(intensity); } #if defined(__cplusplus) || defined(c_plusplus) } #endif static inline double MagickMin(const double x,const double y) { if (x < y) return(x); return(y); } static MagickRealType ApplyEvaluateOperator(RandomInfo *random_info, const Quantum pixel,const MagickEvaluateOperator op, const MagickRealType value) { MagickRealType result; result=0.0; switch (op) { case UndefinedEvaluateOperator: break; case AbsEvaluateOperator: { result=(MagickRealType) fabs((double) (pixel+value)); break; } case AddEvaluateOperator: { result=(MagickRealType) (pixel+value); break; } case AddModulusEvaluateOperator: { /* This returns a 'floored modulus' of the addition which is a positive result. It differs from % or fmod() which returns a 'truncated modulus' result, where floor() is replaced by trunc() and could return a negative result (which is clipped). */ result=pixel+value; result-=(QuantumRange+1.0)*floor((double) result/(QuantumRange+1.0)); break; } case AndEvaluateOperator: { result=(MagickRealType) ((size_t) pixel & (size_t) (value+0.5)); break; } case CosineEvaluateOperator: { result=(MagickRealType) (QuantumRange*(0.5*cos((double) (2.0*MagickPI* QuantumScale*pixel*value))+0.5)); break; } case DivideEvaluateOperator: { result=pixel/(value == 0.0 ? 1.0 : value); break; } case ExponentialEvaluateOperator: { result=(MagickRealType) (QuantumRange*exp((double) (value*QuantumScale* pixel))); break; } case GaussianNoiseEvaluateOperator: { result=(MagickRealType) GenerateDifferentialNoise(random_info,pixel, GaussianNoise,value); break; } case ImpulseNoiseEvaluateOperator: { result=(MagickRealType) GenerateDifferentialNoise(random_info,pixel, ImpulseNoise,value); break; } case LaplacianNoiseEvaluateOperator: { result=(MagickRealType) GenerateDifferentialNoise(random_info,pixel, LaplacianNoise,value); break; } case LeftShiftEvaluateOperator: { result=(MagickRealType) ((size_t) pixel << (size_t) (value+0.5)); break; } case LogEvaluateOperator: { if ((QuantumScale*pixel) >= MagickEpsilon) result=(MagickRealType) (QuantumRange*log((double) (QuantumScale*value* pixel+1.0))/log((double) (value+1.0))); break; } case MaxEvaluateOperator: { result=(MagickRealType) EvaluateMax((double) pixel,value); break; } case MeanEvaluateOperator: { result=(MagickRealType) (pixel+value); break; } case MedianEvaluateOperator: { result=(MagickRealType) (pixel+value); break; } case MinEvaluateOperator: { result=(MagickRealType) MagickMin((double) pixel,value); break; } case MultiplicativeNoiseEvaluateOperator: { result=(MagickRealType) GenerateDifferentialNoise(random_info,pixel, MultiplicativeGaussianNoise,value); break; } case MultiplyEvaluateOperator: { result=(MagickRealType) (value*pixel); break; } case OrEvaluateOperator: { result=(MagickRealType) ((size_t) pixel | (size_t) (value+0.5)); break; } case PoissonNoiseEvaluateOperator: { result=(MagickRealType) GenerateDifferentialNoise(random_info,pixel, PoissonNoise,value); break; } case PowEvaluateOperator: { result=(MagickRealType) (QuantumRange*pow((double) (QuantumScale*pixel), (double) value)); break; } case RightShiftEvaluateOperator: { result=(MagickRealType) ((size_t) pixel >> (size_t) (value+0.5)); break; } case SetEvaluateOperator: { result=value; break; } case SineEvaluateOperator: { result=(MagickRealType) (QuantumRange*(0.5*sin((double) (2.0*MagickPI* QuantumScale*pixel*value))+0.5)); break; } case SubtractEvaluateOperator: { result=(MagickRealType) (pixel-value); break; } case SumEvaluateOperator: { result=(MagickRealType) (pixel+value); break; } case ThresholdEvaluateOperator: { result=(MagickRealType) (((MagickRealType) pixel <= value) ? 0 : QuantumRange); break; } case ThresholdBlackEvaluateOperator: { result=(MagickRealType) (((MagickRealType) pixel <= value) ? 0 : pixel); break; } case ThresholdWhiteEvaluateOperator: { result=(MagickRealType) (((MagickRealType) pixel > value) ? QuantumRange : pixel); break; } case UniformNoiseEvaluateOperator: { result=(MagickRealType) GenerateDifferentialNoise(random_info,pixel, UniformNoise,value); break; } case XorEvaluateOperator: { result=(MagickRealType) ((size_t) pixel ^ (size_t) (value+0.5)); break; } } return(result); } MagickExport MagickBooleanType EvaluateImage(Image *image, const MagickEvaluateOperator op,const double value,ExceptionInfo *exception) { MagickBooleanType status; status=EvaluateImageChannel(image,CompositeChannels,op,value,exception); return(status); } MagickExport Image *EvaluateImages(const Image *images, const MagickEvaluateOperator op,ExceptionInfo *exception) { #define EvaluateImageTag "Evaluate/Image" CacheView *evaluate_view; Image *image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket **restrict evaluate_pixels, zero; RandomInfo **restrict random_info; size_t number_images; ssize_t y; #if defined(MAGICKCORE_OPENMP_SUPPORT) unsigned long key; #endif assert(images != (Image *) NULL); assert(images->signature == MagickSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); image=CloneImage(images,images->columns,images->rows,MagickTrue,exception); if (image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(image,DirectClass) == MagickFalse) { InheritException(exception,&image->exception); image=DestroyImage(image); return((Image *) NULL); } number_images=GetImageListLength(images); evaluate_pixels=AcquirePixelThreadSet(images,number_images); if (evaluate_pixels == (MagickPixelPacket **) NULL) { image=DestroyImage(image); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename); return((Image *) NULL); } /* Evaluate image pixels. */ status=MagickTrue; progress=0; GetMagickPixelPacket(images,&zero); random_info=AcquireRandomInfoThreadSet(); #if defined(MAGICKCORE_OPENMP_SUPPORT) key=GetRandomSecretKey(random_info[0]); #endif evaluate_view=AcquireAuthenticCacheView(image,exception); if (op == MedianEvaluateOperator) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,images,image->rows,key == ~0UL) #endif for (y=0; y < (ssize_t) image->rows; y++) { CacheView *image_view; const Image *next; const int id = GetOpenMPThreadId(); register IndexPacket *restrict evaluate_indexes; register MagickPixelPacket *evaluate_pixel; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(evaluate_view,0,y, image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } evaluate_indexes=GetCacheViewAuthenticIndexQueue(evaluate_view); evaluate_pixel=evaluate_pixels[id]; for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) number_images; i++) evaluate_pixel[i]=zero; next=images; for (i=0; i < (ssize_t) number_images; i++) { register const IndexPacket *indexes; register const PixelPacket *p; image_view=AcquireVirtualCacheView(next,exception); p=GetCacheViewVirtualPixels(image_view,x,y,1,1,exception); if (p == (const PixelPacket *) NULL) { image_view=DestroyCacheView(image_view); break; } indexes=GetCacheViewVirtualIndexQueue(image_view); evaluate_pixel[i].red=ApplyEvaluateOperator(random_info[id], GetPixelRed(p),op,evaluate_pixel[i].red); evaluate_pixel[i].green=ApplyEvaluateOperator(random_info[id], GetPixelGreen(p),op,evaluate_pixel[i].green); evaluate_pixel[i].blue=ApplyEvaluateOperator(random_info[id], GetPixelBlue(p),op,evaluate_pixel[i].blue); evaluate_pixel[i].opacity=ApplyEvaluateOperator(random_info[id], GetPixelAlpha(p),op,evaluate_pixel[i].opacity); if (image->colorspace == CMYKColorspace) evaluate_pixel[i].index=ApplyEvaluateOperator(random_info[id], *indexes,op,evaluate_pixel[i].index); image_view=DestroyCacheView(image_view); next=GetNextImageInList(next); } qsort((void *) evaluate_pixel,number_images,sizeof(*evaluate_pixel), IntensityCompare); SetPixelRed(q,ClampToQuantum(evaluate_pixel[i/2].red)); SetPixelGreen(q,ClampToQuantum(evaluate_pixel[i/2].green)); SetPixelBlue(q,ClampToQuantum(evaluate_pixel[i/2].blue)); SetPixelAlpha(q,ClampToQuantum(evaluate_pixel[i/2].opacity)); if (image->colorspace == CMYKColorspace) SetPixelIndex(evaluate_indexes+i,ClampToQuantum( evaluate_pixel[i/2].index)); q++; } if (SyncCacheViewAuthenticPixels(evaluate_view,exception) == MagickFalse) status=MagickFalse; if (images->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_EvaluateImages) #endif proceed=SetImageProgress(images,EvaluateImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } } else { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,images,image->rows,key == ~0UL) #endif for (y=0; y < (ssize_t) image->rows; y++) { CacheView *image_view; const Image *next; const int id = GetOpenMPThreadId(); register IndexPacket *restrict evaluate_indexes; register ssize_t i, x; register MagickPixelPacket *evaluate_pixel; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(evaluate_view,0,y, image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } evaluate_indexes=GetCacheViewAuthenticIndexQueue(evaluate_view); evaluate_pixel=evaluate_pixels[id]; for (x=0; x < (ssize_t) image->columns; x++) evaluate_pixel[x]=zero; next=images; for (i=0; i < (ssize_t) number_images; i++) { register const IndexPacket *indexes; register const PixelPacket *p; image_view=AcquireVirtualCacheView(next,exception); p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception); if (p == (const PixelPacket *) NULL) { image_view=DestroyCacheView(image_view); break; } indexes=GetCacheViewVirtualIndexQueue(image_view); for (x=0; x < (ssize_t) next->columns; x++) { evaluate_pixel[x].red=ApplyEvaluateOperator(random_info[id], GetPixelRed(p),i == 0 ? AddEvaluateOperator : op, evaluate_pixel[x].red); evaluate_pixel[x].green=ApplyEvaluateOperator(random_info[id], GetPixelGreen(p),i == 0 ? AddEvaluateOperator : op, evaluate_pixel[x].green); evaluate_pixel[x].blue=ApplyEvaluateOperator(random_info[id], GetPixelBlue(p),i == 0 ? AddEvaluateOperator : op, evaluate_pixel[x].blue); evaluate_pixel[x].opacity=ApplyEvaluateOperator(random_info[id], GetPixelAlpha(p),i == 0 ? AddEvaluateOperator : op, evaluate_pixel[x].opacity); if (image->colorspace == CMYKColorspace) evaluate_pixel[x].index=ApplyEvaluateOperator(random_info[id], GetPixelIndex(indexes+x),i == 0 ? AddEvaluateOperator : op, evaluate_pixel[x].index); p++; } image_view=DestroyCacheView(image_view); next=GetNextImageInList(next); } if (op == MeanEvaluateOperator) for (x=0; x < (ssize_t) image->columns; x++) { evaluate_pixel[x].red/=number_images; evaluate_pixel[x].green/=number_images; evaluate_pixel[x].blue/=number_images; evaluate_pixel[x].opacity/=number_images; evaluate_pixel[x].index/=number_images; } if (op == MultiplyEvaluateOperator) for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t j; for (j=0; j < (ssize_t) (number_images-1); j++) { evaluate_pixel[x].red*=(MagickRealType) QuantumScale; evaluate_pixel[x].green*=(MagickRealType) QuantumScale; evaluate_pixel[x].blue*=(MagickRealType) QuantumScale; evaluate_pixel[x].opacity*=(MagickRealType) QuantumScale; evaluate_pixel[x].index*=(MagickRealType) QuantumScale; } } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q,ClampToQuantum(evaluate_pixel[x].red)); SetPixelGreen(q,ClampToQuantum(evaluate_pixel[x].green)); SetPixelBlue(q,ClampToQuantum(evaluate_pixel[x].blue)); SetPixelAlpha(q,ClampToQuantum(evaluate_pixel[x].opacity)); if (image->colorspace == CMYKColorspace) SetPixelIndex(evaluate_indexes+x,ClampToQuantum( evaluate_pixel[x].index)); q++; } if (SyncCacheViewAuthenticPixels(evaluate_view,exception) == MagickFalse) status=MagickFalse; if (images->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_EvaluateImages) #endif proceed=SetImageProgress(images,EvaluateImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } } evaluate_view=DestroyCacheView(evaluate_view); evaluate_pixels=DestroyPixelThreadSet(evaluate_pixels); random_info=DestroyRandomInfoThreadSet(random_info); if (status == MagickFalse) image=DestroyImage(image); return(image); } MagickExport MagickBooleanType EvaluateImageChannel(Image *image, const ChannelType channel,const MagickEvaluateOperator op,const double value, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; RandomInfo **restrict random_info; ssize_t y; #if defined(MAGICKCORE_OPENMP_SUPPORT) unsigned long key; #endif assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); if (SetImageStorageClass(image,DirectClass) == MagickFalse) { InheritException(exception,&image->exception); return(MagickFalse); } status=MagickTrue; progress=0; random_info=AcquireRandomInfoThreadSet(); #if defined(MAGICKCORE_OPENMP_SUPPORT) key=GetRandomSecretKey(random_info[0]); #endif image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,key == ~0UL) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(ApplyEvaluateOperator(random_info[id], GetPixelRed(q),op,value))); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(ApplyEvaluateOperator(random_info[id], GetPixelGreen(q),op,value))); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(ApplyEvaluateOperator(random_info[id], GetPixelBlue(q),op,value))); if ((channel & OpacityChannel) != 0) { if (image->matte == MagickFalse) SetPixelOpacity(q,ClampToQuantum(ApplyEvaluateOperator( random_info[id],GetPixelOpacity(q),op,value))); else SetPixelAlpha(q,ClampToQuantum(ApplyEvaluateOperator( random_info[id],(Quantum) GetPixelAlpha(q),op,value))); } if (((channel & IndexChannel) != 0) && (indexes != (IndexPacket *) NULL)) SetPixelIndex(indexes+x,ClampToQuantum(ApplyEvaluateOperator( random_info[id],GetPixelIndex(indexes+x),op,value))); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_EvaluateImageChannel) #endif proceed=SetImageProgress(image,EvaluateImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); random_info=DestroyRandomInfoThreadSet(random_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F u n c t i o n I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FunctionImage() applies a value to the image with an arithmetic, relational, % or logical operator to an image. Use these operations to lighten or darken % an image, to increase or decrease contrast in an image, or to produce the % "negative" of an image. % % The format of the FunctionImageChannel method is: % % MagickBooleanType FunctionImage(Image *image, % const MagickFunction function,const ssize_t number_parameters, % const double *parameters,ExceptionInfo *exception) % MagickBooleanType FunctionImageChannel(Image *image, % const ChannelType channel,const MagickFunction function, % const ssize_t number_parameters,const double *argument, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o function: A channel function. % % o parameters: one or more parameters. % % o exception: return any errors or warnings in this structure. % */ static Quantum ApplyFunction(Quantum pixel,const MagickFunction function, const size_t number_parameters,const double *parameters, ExceptionInfo *exception) { MagickRealType result; register ssize_t i; (void) exception; result=0.0; switch (function) { case PolynomialFunction: { /* * Polynomial * Parameters: polynomial constants, highest to lowest order * For example: c0*x^3 + c1*x^2 + c2*x + c3 */ result=0.0; for (i=0; i < (ssize_t) number_parameters; i++) result=result*QuantumScale*pixel + parameters[i]; result*=QuantumRange; break; } case SinusoidFunction: { /* Sinusoid Function * Parameters: Freq, Phase, Ampl, bias */ double freq,phase,ampl,bias; freq = ( number_parameters >= 1 ) ? parameters[0] : 1.0; phase = ( number_parameters >= 2 ) ? parameters[1] : 0.0; ampl = ( number_parameters >= 3 ) ? parameters[2] : 0.5; bias = ( number_parameters >= 4 ) ? parameters[3] : 0.5; result=(MagickRealType) (QuantumRange*(ampl*sin((double) (2.0*MagickPI* (freq*QuantumScale*pixel + phase/360.0) )) + bias ) ); break; } case ArcsinFunction: { /* Arcsin Function (peged at range limits for invalid results) * Parameters: Width, Center, Range, Bias */ double width,range,center,bias; width = ( number_parameters >= 1 ) ? parameters[0] : 1.0; center = ( number_parameters >= 2 ) ? parameters[1] : 0.5; range = ( number_parameters >= 3 ) ? parameters[2] : 1.0; bias = ( number_parameters >= 4 ) ? parameters[3] : 0.5; result = 2.0/width*(QuantumScale*pixel - center); if ( result <= -1.0 ) result = bias - range/2.0; else if ( result >= 1.0 ) result = bias + range/2.0; else result=(MagickRealType) (range/MagickPI*asin((double) result)+bias); result *= QuantumRange; break; } case ArctanFunction: { /* Arctan Function * Parameters: Slope, Center, Range, Bias */ double slope,range,center,bias; slope = ( number_parameters >= 1 ) ? parameters[0] : 1.0; center = ( number_parameters >= 2 ) ? parameters[1] : 0.5; range = ( number_parameters >= 3 ) ? parameters[2] : 1.0; bias = ( number_parameters >= 4 ) ? parameters[3] : 0.5; result=(MagickRealType) (MagickPI*slope*(QuantumScale*pixel-center)); result=(MagickRealType) (QuantumRange*(range/MagickPI*atan((double) result) + bias ) ); break; } case UndefinedFunction: break; } return(ClampToQuantum(result)); } MagickExport MagickBooleanType FunctionImage(Image *image, const MagickFunction function,const size_t number_parameters, const double *parameters,ExceptionInfo *exception) { MagickBooleanType status; status=FunctionImageChannel(image,CompositeChannels,function, number_parameters,parameters,exception); return(status); } MagickExport MagickBooleanType FunctionImageChannel(Image *image, const ChannelType channel,const MagickFunction function, const size_t number_parameters,const double *parameters, ExceptionInfo *exception) { #define FunctionImageTag "Function/Image " CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); if (SetImageStorageClass(image,DirectClass) == MagickFalse) { InheritException(exception,&image->exception); return(MagickFalse); } status=AccelerateFunctionImage(image,channel,function,number_parameters, parameters,exception); if (status != MagickFalse) return(status); status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *restrict indexes; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q,ApplyFunction(GetPixelRed(q),function, number_parameters,parameters,exception)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ApplyFunction(GetPixelGreen(q),function, number_parameters,parameters,exception)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ApplyFunction(GetPixelBlue(q),function, number_parameters,parameters,exception)); if ((channel & OpacityChannel) != 0) { if (image->matte == MagickFalse) SetPixelOpacity(q,ApplyFunction(GetPixelOpacity(q),function, number_parameters,parameters,exception)); else SetPixelAlpha(q,ApplyFunction((Quantum) GetPixelAlpha(q),function, number_parameters,parameters,exception)); } if (((channel & IndexChannel) != 0) && (indexes != (IndexPacket *) NULL)) SetPixelIndex(indexes+x,ApplyFunction(GetPixelIndex(indexes+x),function, number_parameters,parameters,exception)); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_FunctionImageChannel) #endif proceed=SetImageProgress(image,FunctionImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e C h a n n e l E x t r e m a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageChannelExtrema() returns the extrema of one or more image channels. % % The format of the GetImageChannelExtrema method is: % % MagickBooleanType GetImageChannelExtrema(const Image *image, % const ChannelType channel,size_t *minima,size_t *maxima, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o minima: the minimum value in the channel. % % o maxima: the maximum value in the channel. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetImageExtrema(const Image *image, size_t *minima,size_t *maxima,ExceptionInfo *exception) { return(GetImageChannelExtrema(image,CompositeChannels,minima,maxima,exception)); } MagickExport MagickBooleanType GetImageChannelExtrema(const Image *image, const ChannelType channel,size_t *minima,size_t *maxima, ExceptionInfo *exception) { double max, min; MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=GetImageChannelRange(image,channel,&min,&max,exception); *minima=(size_t) ceil(min-0.5); *maxima=(size_t) floor(max+0.5); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e C h a n n e l K u r t o s i s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageChannelKurtosis() returns the kurtosis and skewness of one or more % image channels. % % The format of the GetImageChannelKurtosis method is: % % MagickBooleanType GetImageChannelKurtosis(const Image *image, % const ChannelType channel,double *kurtosis,double *skewness, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o kurtosis: the kurtosis of the channel. % % o skewness: the skewness of the channel. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetImageKurtosis(const Image *image, double *kurtosis,double *skewness,ExceptionInfo *exception) { MagickBooleanType status; status=GetImageChannelKurtosis(image,CompositeChannels,kurtosis,skewness, exception); return(status); } MagickExport MagickBooleanType GetImageChannelKurtosis(const Image *image, const ChannelType channel,double *kurtosis,double *skewness, ExceptionInfo *exception) { double area, mean, standard_deviation, sum_squares, sum_cubes, sum_fourth_power; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); *kurtosis=0.0; *skewness=0.0; area=0.0; mean=0.0; standard_deviation=0.0; sum_squares=0.0; sum_cubes=0.0; sum_fourth_power=0.0; for (y=0; y < (ssize_t) image->rows; y++) { register const IndexPacket *restrict indexes; register const PixelPacket *restrict p; register ssize_t x; p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; indexes=GetVirtualIndexQueue(image); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) { mean+=GetPixelRed(p); sum_squares+=(double) GetPixelRed(p)*GetPixelRed(p); sum_cubes+=(double) GetPixelRed(p)*GetPixelRed(p)*GetPixelRed(p); sum_fourth_power+=(double) GetPixelRed(p)*GetPixelRed(p)* GetPixelRed(p)*GetPixelRed(p); area++; } if ((channel & GreenChannel) != 0) { mean+=GetPixelGreen(p); sum_squares+=(double) GetPixelGreen(p)*GetPixelGreen(p); sum_cubes+=(double) GetPixelGreen(p)*GetPixelGreen(p)* GetPixelGreen(p); sum_fourth_power+=(double) GetPixelGreen(p)*GetPixelGreen(p)* GetPixelGreen(p)*GetPixelGreen(p); area++; } if ((channel & BlueChannel) != 0) { mean+=GetPixelBlue(p); sum_squares+=(double) GetPixelBlue(p)*GetPixelBlue(p); sum_cubes+=(double) GetPixelBlue(p)*GetPixelBlue(p)*GetPixelBlue(p); sum_fourth_power+=(double) GetPixelBlue(p)*GetPixelBlue(p)* GetPixelBlue(p)*GetPixelBlue(p); area++; } if ((channel & OpacityChannel) != 0) { mean+=GetPixelOpacity(p); sum_squares+=(double) GetPixelOpacity(p)*GetPixelOpacity(p); sum_cubes+=(double) GetPixelOpacity(p)*GetPixelOpacity(p)* GetPixelOpacity(p); sum_fourth_power+=(double) GetPixelOpacity(p)*GetPixelOpacity(p)* GetPixelOpacity(p)*GetPixelOpacity(p); area++; } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { mean+=GetPixelIndex(indexes+x); sum_squares+=(double) GetPixelIndex(indexes+x)* GetPixelIndex(indexes+x); sum_cubes+=(double) GetPixelIndex(indexes+x)*GetPixelIndex(indexes+x)* GetPixelIndex(indexes+x); sum_fourth_power+=(double) GetPixelIndex(indexes+x)* GetPixelIndex(indexes+x)*GetPixelIndex(indexes+x)* GetPixelIndex(indexes+x); area++; } p++; } } if (y < (ssize_t) image->rows) return(MagickFalse); if (area != 0.0) { mean/=area; sum_squares/=area; sum_cubes/=area; sum_fourth_power/=area; } standard_deviation=sqrt(sum_squares-(mean*mean)); if (standard_deviation != 0.0) { *kurtosis=sum_fourth_power-4.0*mean*sum_cubes+6.0*mean*mean*sum_squares- 3.0*mean*mean*mean*mean; *kurtosis/=standard_deviation*standard_deviation*standard_deviation* standard_deviation; *kurtosis-=3.0; *skewness=sum_cubes-3.0*mean*sum_squares+2.0*mean*mean*mean; *skewness/=standard_deviation*standard_deviation*standard_deviation; } return(y == (ssize_t) image->rows ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e C h a n n e l M e a n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageChannelMean() returns the mean and standard deviation of one or more % image channels. % % The format of the GetImageChannelMean method is: % % MagickBooleanType GetImageChannelMean(const Image *image, % const ChannelType channel,double *mean,double *standard_deviation, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o mean: the average value in the channel. % % o standard_deviation: the standard deviation of the channel. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetImageMean(const Image *image,double *mean, double *standard_deviation,ExceptionInfo *exception) { MagickBooleanType status; status=GetImageChannelMean(image,CompositeChannels,mean,standard_deviation, exception); return(status); } MagickExport MagickBooleanType GetImageChannelMean(const Image *image, const ChannelType channel,double *mean,double *standard_deviation, ExceptionInfo *exception) { ChannelStatistics *channel_statistics; size_t channels; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); channel_statistics=GetImageChannelStatistics(image,exception); if (channel_statistics == (ChannelStatistics *) NULL) return(MagickFalse); channels=0; channel_statistics[CompositeChannels].mean=0.0; channel_statistics[CompositeChannels].standard_deviation=0.0; if ((channel & RedChannel) != 0) { channel_statistics[CompositeChannels].mean+= channel_statistics[RedChannel].mean; channel_statistics[CompositeChannels].standard_deviation+= channel_statistics[RedChannel].variance- channel_statistics[RedChannel].mean* channel_statistics[RedChannel].mean; channels++; } if ((channel & GreenChannel) != 0) { channel_statistics[CompositeChannels].mean+= channel_statistics[GreenChannel].mean; channel_statistics[CompositeChannels].standard_deviation+= channel_statistics[GreenChannel].variance- channel_statistics[GreenChannel].mean* channel_statistics[GreenChannel].mean; channels++; } if ((channel & BlueChannel) != 0) { channel_statistics[CompositeChannels].mean+= channel_statistics[BlueChannel].mean; channel_statistics[CompositeChannels].standard_deviation+= channel_statistics[BlueChannel].variance- channel_statistics[BlueChannel].mean* channel_statistics[BlueChannel].mean; channels++; } if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) { channel_statistics[CompositeChannels].mean+= channel_statistics[OpacityChannel].mean; channel_statistics[CompositeChannels].standard_deviation+= channel_statistics[OpacityChannel].variance- channel_statistics[OpacityChannel].mean* channel_statistics[OpacityChannel].mean; channels++; } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { channel_statistics[CompositeChannels].mean+= channel_statistics[BlackChannel].mean; channel_statistics[CompositeChannels].standard_deviation+= channel_statistics[BlackChannel].variance- channel_statistics[BlackChannel].mean* channel_statistics[BlackChannel].mean; channels++; } channel_statistics[CompositeChannels].mean/=channels; channel_statistics[CompositeChannels].standard_deviation= sqrt(channel_statistics[CompositeChannels].standard_deviation/channels); *mean=channel_statistics[CompositeChannels].mean; *standard_deviation=channel_statistics[CompositeChannels].standard_deviation; channel_statistics=(ChannelStatistics *) RelinquishMagickMemory( channel_statistics); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e C h a n n e l M o m e n t s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageChannelMoments() returns the normalized moments of one or more image % channels. % % The format of the GetImageChannelMoments method is: % % ChannelMoments *GetImageChannelMoments(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport ChannelMoments *GetImageChannelMoments(const Image *image, ExceptionInfo *exception) { #define MaxNumberImageMoments 8 ChannelMoments *channel_moments; double M00[CompositeChannels+1], M01[CompositeChannels+1], M02[CompositeChannels+1], M03[CompositeChannels+1], M10[CompositeChannels+1], M11[CompositeChannels+1], M12[CompositeChannels+1], M20[CompositeChannels+1], M21[CompositeChannels+1], M22[CompositeChannels+1], M30[CompositeChannels+1]; MagickPixelPacket pixel; PointInfo centroid[CompositeChannels+1]; ssize_t channel, channels, y; size_t length; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); length=CompositeChannels+1UL; channel_moments=(ChannelMoments *) AcquireQuantumMemory(length, sizeof(*channel_moments)); if (channel_moments == (ChannelMoments *) NULL) return(channel_moments); (void) ResetMagickMemory(channel_moments,0,length*sizeof(*channel_moments)); (void) ResetMagickMemory(centroid,0,sizeof(centroid)); (void) ResetMagickMemory(M00,0,sizeof(M00)); (void) ResetMagickMemory(M01,0,sizeof(M01)); (void) ResetMagickMemory(M02,0,sizeof(M02)); (void) ResetMagickMemory(M03,0,sizeof(M03)); (void) ResetMagickMemory(M10,0,sizeof(M10)); (void) ResetMagickMemory(M11,0,sizeof(M11)); (void) ResetMagickMemory(M12,0,sizeof(M12)); (void) ResetMagickMemory(M20,0,sizeof(M20)); (void) ResetMagickMemory(M21,0,sizeof(M21)); (void) ResetMagickMemory(M22,0,sizeof(M22)); (void) ResetMagickMemory(M30,0,sizeof(M30)); GetMagickPixelPacket(image,&pixel); for (y=0; y < (ssize_t) image->rows; y++) { register const IndexPacket *restrict indexes; register const PixelPacket *restrict p; register ssize_t x; /* Compute center of mass (centroid). */ p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; indexes=GetVirtualIndexQueue(image); for (x=0; x < (ssize_t) image->columns; x++) { SetMagickPixelPacket(image,p,indexes+x,&pixel); M00[RedChannel]+=QuantumScale*pixel.red; M10[RedChannel]+=x*QuantumScale*pixel.red; M01[RedChannel]+=y*QuantumScale*pixel.red; M00[GreenChannel]+=QuantumScale*pixel.green; M10[GreenChannel]+=x*QuantumScale*pixel.green; M01[GreenChannel]+=y*QuantumScale*pixel.green; M00[BlueChannel]+=QuantumScale*pixel.blue; M10[BlueChannel]+=x*QuantumScale*pixel.blue; M01[BlueChannel]+=y*QuantumScale*pixel.blue; if (image->matte != MagickFalse) { M00[OpacityChannel]+=QuantumScale*pixel.opacity; M10[OpacityChannel]+=x*QuantumScale*pixel.opacity; M01[OpacityChannel]+=y*QuantumScale*pixel.opacity; } if (image->colorspace == CMYKColorspace) { M00[IndexChannel]+=QuantumScale*pixel.index; M10[IndexChannel]+=x*QuantumScale*pixel.index; M01[IndexChannel]+=y*QuantumScale*pixel.index; } p++; } } for (channel=0; channel <= CompositeChannels; channel++) { /* Compute center of mass (centroid). */ if (M00[channel] < MagickEpsilon) { M00[channel]+=MagickEpsilon; centroid[channel].x=(double) image->columns/2.0; centroid[channel].y=(double) image->rows/2.0; continue; } M00[channel]+=MagickEpsilon; centroid[channel].x=M10[channel]/M00[channel]; centroid[channel].y=M01[channel]/M00[channel]; } for (y=0; y < (ssize_t) image->rows; y++) { register const IndexPacket *restrict indexes; register const PixelPacket *restrict p; register ssize_t x; /* Compute the image moments. */ p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; indexes=GetVirtualIndexQueue(image); for (x=0; x < (ssize_t) image->columns; x++) { SetMagickPixelPacket(image,p,indexes+x,&pixel); M11[RedChannel]+=(x-centroid[RedChannel].x)*(y- centroid[RedChannel].y)*QuantumScale*pixel.red; M20[RedChannel]+=(x-centroid[RedChannel].x)*(x- centroid[RedChannel].x)*QuantumScale*pixel.red; M02[RedChannel]+=(y-centroid[RedChannel].y)*(y- centroid[RedChannel].y)*QuantumScale*pixel.red; M21[RedChannel]+=(x-centroid[RedChannel].x)*(x- centroid[RedChannel].x)*(y-centroid[RedChannel].y)*QuantumScale* pixel.red; M12[RedChannel]+=(x-centroid[RedChannel].x)*(y- centroid[RedChannel].y)*(y-centroid[RedChannel].y)*QuantumScale* pixel.red; M22[RedChannel]+=(x-centroid[RedChannel].x)*(x- centroid[RedChannel].x)*(y-centroid[RedChannel].y)*(y- centroid[RedChannel].y)*QuantumScale*pixel.red; M30[RedChannel]+=(x-centroid[RedChannel].x)*(x- centroid[RedChannel].x)*(x-centroid[RedChannel].x)*QuantumScale* pixel.red; M03[RedChannel]+=(y-centroid[RedChannel].y)*(y- centroid[RedChannel].y)*(y-centroid[RedChannel].y)*QuantumScale* pixel.red; M11[GreenChannel]+=(x-centroid[GreenChannel].x)*(y- centroid[GreenChannel].y)*QuantumScale*pixel.green; M20[GreenChannel]+=(x-centroid[GreenChannel].x)*(x- centroid[GreenChannel].x)*QuantumScale*pixel.green; M02[GreenChannel]+=(y-centroid[GreenChannel].y)*(y- centroid[GreenChannel].y)*QuantumScale*pixel.green; M21[GreenChannel]+=(x-centroid[GreenChannel].x)*(x- centroid[GreenChannel].x)*(y-centroid[GreenChannel].y)*QuantumScale* pixel.green; M12[GreenChannel]+=(x-centroid[GreenChannel].x)*(y- centroid[GreenChannel].y)*(y-centroid[GreenChannel].y)*QuantumScale* pixel.green; M22[GreenChannel]+=(x-centroid[GreenChannel].x)*(x- centroid[GreenChannel].x)*(y-centroid[GreenChannel].y)*(y- centroid[GreenChannel].y)*QuantumScale*pixel.green; M30[GreenChannel]+=(x-centroid[GreenChannel].x)*(x- centroid[GreenChannel].x)*(x-centroid[GreenChannel].x)*QuantumScale* pixel.green; M03[GreenChannel]+=(y-centroid[GreenChannel].y)*(y- centroid[GreenChannel].y)*(y-centroid[GreenChannel].y)*QuantumScale* pixel.green; M11[BlueChannel]+=(x-centroid[BlueChannel].x)*(y- centroid[BlueChannel].y)*QuantumScale*pixel.blue; M20[BlueChannel]+=(x-centroid[BlueChannel].x)*(x- centroid[BlueChannel].x)*QuantumScale*pixel.blue; M02[BlueChannel]+=(y-centroid[BlueChannel].y)*(y- centroid[BlueChannel].y)*QuantumScale*pixel.blue; M21[BlueChannel]+=(x-centroid[BlueChannel].x)*(x- centroid[BlueChannel].x)*(y-centroid[BlueChannel].y)*QuantumScale* pixel.blue; M12[BlueChannel]+=(x-centroid[BlueChannel].x)*(y- centroid[BlueChannel].y)*(y-centroid[BlueChannel].y)*QuantumScale* pixel.blue; M22[BlueChannel]+=(x-centroid[BlueChannel].x)*(x- centroid[BlueChannel].x)*(y-centroid[BlueChannel].y)*(y- centroid[BlueChannel].y)*QuantumScale*pixel.blue; M30[BlueChannel]+=(x-centroid[BlueChannel].x)*(x- centroid[BlueChannel].x)*(x-centroid[BlueChannel].x)*QuantumScale* pixel.blue; M03[BlueChannel]+=(y-centroid[BlueChannel].y)*(y- centroid[BlueChannel].y)*(y-centroid[BlueChannel].y)*QuantumScale* pixel.blue; if (image->matte != MagickFalse) { M11[OpacityChannel]+=(x-centroid[OpacityChannel].x)*(y- centroid[OpacityChannel].y)*QuantumScale*pixel.opacity; M20[OpacityChannel]+=(x-centroid[OpacityChannel].x)*(x- centroid[OpacityChannel].x)*QuantumScale*pixel.opacity; M02[OpacityChannel]+=(y-centroid[OpacityChannel].y)*(y- centroid[OpacityChannel].y)*QuantumScale*pixel.opacity; M21[OpacityChannel]+=(x-centroid[OpacityChannel].x)*(x- centroid[OpacityChannel].x)*(y-centroid[OpacityChannel].y)* QuantumScale*pixel.opacity; M12[OpacityChannel]+=(x-centroid[OpacityChannel].x)*(y- centroid[OpacityChannel].y)*(y-centroid[OpacityChannel].y)* QuantumScale*pixel.opacity; M22[OpacityChannel]+=(x-centroid[OpacityChannel].x)*(x- centroid[OpacityChannel].x)*(y-centroid[OpacityChannel].y)*(y- centroid[OpacityChannel].y)*QuantumScale*pixel.opacity; M30[OpacityChannel]+=(x-centroid[OpacityChannel].x)*(x- centroid[OpacityChannel].x)*(x-centroid[OpacityChannel].x)* QuantumScale*pixel.opacity; M03[OpacityChannel]+=(y-centroid[OpacityChannel].y)*(y- centroid[OpacityChannel].y)*(y-centroid[OpacityChannel].y)* QuantumScale*pixel.opacity; } if (image->colorspace == CMYKColorspace) { M11[IndexChannel]+=(x-centroid[IndexChannel].x)*(y- centroid[IndexChannel].y)*QuantumScale*pixel.index; M20[IndexChannel]+=(x-centroid[IndexChannel].x)*(x- centroid[IndexChannel].x)*QuantumScale*pixel.index; M02[IndexChannel]+=(y-centroid[IndexChannel].y)*(y- centroid[IndexChannel].y)*QuantumScale*pixel.index; M21[IndexChannel]+=(x-centroid[IndexChannel].x)*(x- centroid[IndexChannel].x)*(y-centroid[IndexChannel].y)* QuantumScale*pixel.index; M12[IndexChannel]+=(x-centroid[IndexChannel].x)*(y- centroid[IndexChannel].y)*(y-centroid[IndexChannel].y)* QuantumScale*pixel.index; M22[IndexChannel]+=(x-centroid[IndexChannel].x)*(x- centroid[IndexChannel].x)*(y-centroid[IndexChannel].y)*(y- centroid[IndexChannel].y)*QuantumScale*pixel.index; M30[IndexChannel]+=(x-centroid[IndexChannel].x)*(x- centroid[IndexChannel].x)*(x-centroid[IndexChannel].x)* QuantumScale*pixel.index; M03[IndexChannel]+=(y-centroid[IndexChannel].y)*(y- centroid[IndexChannel].y)*(y-centroid[IndexChannel].y)* QuantumScale*pixel.index; } p++; } } channels=3; M00[CompositeChannels]+=(M00[RedChannel]+M00[GreenChannel]+M00[BlueChannel]); M01[CompositeChannels]+=(M01[RedChannel]+M01[GreenChannel]+M01[BlueChannel]); M02[CompositeChannels]+=(M02[RedChannel]+M02[GreenChannel]+M02[BlueChannel]); M03[CompositeChannels]+=(M03[RedChannel]+M03[GreenChannel]+M03[BlueChannel]); M10[CompositeChannels]+=(M10[RedChannel]+M10[GreenChannel]+M10[BlueChannel]); M11[CompositeChannels]+=(M11[RedChannel]+M11[GreenChannel]+M11[BlueChannel]); M12[CompositeChannels]+=(M12[RedChannel]+M12[GreenChannel]+M12[BlueChannel]); M20[CompositeChannels]+=(M20[RedChannel]+M20[GreenChannel]+M20[BlueChannel]); M21[CompositeChannels]+=(M21[RedChannel]+M21[GreenChannel]+M21[BlueChannel]); M22[CompositeChannels]+=(M22[RedChannel]+M22[GreenChannel]+M22[BlueChannel]); M30[CompositeChannels]+=(M30[RedChannel]+M30[GreenChannel]+M30[BlueChannel]); if (image->matte != MagickFalse) { channels+=1; M00[CompositeChannels]+=M00[OpacityChannel]; M01[CompositeChannels]+=M01[OpacityChannel]; M02[CompositeChannels]+=M02[OpacityChannel]; M03[CompositeChannels]+=M03[OpacityChannel]; M10[CompositeChannels]+=M10[OpacityChannel]; M11[CompositeChannels]+=M11[OpacityChannel]; M12[CompositeChannels]+=M12[OpacityChannel]; M20[CompositeChannels]+=M20[OpacityChannel]; M21[CompositeChannels]+=M21[OpacityChannel]; M22[CompositeChannels]+=M22[OpacityChannel]; M30[CompositeChannels]+=M30[OpacityChannel]; } if (image->colorspace == CMYKColorspace) { channels+=1; M00[CompositeChannels]+=M00[IndexChannel]; M01[CompositeChannels]+=M01[IndexChannel]; M02[CompositeChannels]+=M02[IndexChannel]; M03[CompositeChannels]+=M03[IndexChannel]; M10[CompositeChannels]+=M10[IndexChannel]; M11[CompositeChannels]+=M11[IndexChannel]; M12[CompositeChannels]+=M12[IndexChannel]; M20[CompositeChannels]+=M20[IndexChannel]; M21[CompositeChannels]+=M21[IndexChannel]; M22[CompositeChannels]+=M22[IndexChannel]; M30[CompositeChannels]+=M30[IndexChannel]; } M00[CompositeChannels]/=(double) channels; M01[CompositeChannels]/=(double) channels; M02[CompositeChannels]/=(double) channels; M03[CompositeChannels]/=(double) channels; M10[CompositeChannels]/=(double) channels; M11[CompositeChannels]/=(double) channels; M12[CompositeChannels]/=(double) channels; M20[CompositeChannels]/=(double) channels; M21[CompositeChannels]/=(double) channels; M22[CompositeChannels]/=(double) channels; M30[CompositeChannels]/=(double) channels; for (channel=0; channel <= CompositeChannels; channel++) { /* Compute elliptical angle, major and minor axes, eccentricity, & intensity. */ channel_moments[channel].centroid=centroid[channel]; channel_moments[channel].ellipse_axis.x=sqrt((2.0/M00[channel])* ((M20[channel]+M02[channel])+sqrt(4.0*M11[channel]*M11[channel]+ (M20[channel]-M02[channel])*(M20[channel]-M02[channel])))); channel_moments[channel].ellipse_axis.y=sqrt((2.0/M00[channel])* ((M20[channel]+M02[channel])-sqrt(4.0*M11[channel]*M11[channel]+ (M20[channel]-M02[channel])*(M20[channel]-M02[channel])))); channel_moments[channel].ellipse_angle=RadiansToDegrees(0.5*atan(2.0* M11[channel]/(M20[channel]-M02[channel]+MagickEpsilon))); channel_moments[channel].ellipse_eccentricity=sqrt(1.0-( channel_moments[channel].ellipse_axis.y/ (channel_moments[channel].ellipse_axis.x+MagickEpsilon))); channel_moments[channel].ellipse_intensity=M00[channel]/ (MagickPI*channel_moments[channel].ellipse_axis.x* channel_moments[channel].ellipse_axis.y+MagickEpsilon); } for (channel=0; channel <= CompositeChannels; channel++) { /* Normalize image moments. */ M10[channel]=0.0; M01[channel]=0.0; M11[channel]/=pow(M00[channel],1.0+(1.0+1.0)/2.0); M20[channel]/=pow(M00[channel],1.0+(2.0+0.0)/2.0); M02[channel]/=pow(M00[channel],1.0+(0.0+2.0)/2.0); M21[channel]/=pow(M00[channel],1.0+(2.0+1.0)/2.0); M12[channel]/=pow(M00[channel],1.0+(1.0+2.0)/2.0); M22[channel]/=pow(M00[channel],1.0+(2.0+2.0)/2.0); M30[channel]/=pow(M00[channel],1.0+(3.0+0.0)/2.0); M03[channel]/=pow(M00[channel],1.0+(0.0+3.0)/2.0); M00[channel]=1.0; } for (channel=0; channel <= CompositeChannels; channel++) { /* Compute Hu invariant moments. */ channel_moments[channel].I[0]=M20[channel]+M02[channel]; channel_moments[channel].I[1]=(M20[channel]-M02[channel])* (M20[channel]-M02[channel])+4.0*M11[channel]*M11[channel]; channel_moments[channel].I[2]=(M30[channel]-3.0*M12[channel])* (M30[channel]-3.0*M12[channel])+(3.0*M21[channel]-M03[channel])* (3.0*M21[channel]-M03[channel]); channel_moments[channel].I[3]=(M30[channel]+M12[channel])* (M30[channel]+M12[channel])+(M21[channel]+M03[channel])* (M21[channel]+M03[channel]); channel_moments[channel].I[4]=(M30[channel]-3.0*M12[channel])* (M30[channel]+M12[channel])*((M30[channel]+M12[channel])* (M30[channel]+M12[channel])-3.0*(M21[channel]+M03[channel])* (M21[channel]+M03[channel]))+(3.0*M21[channel]-M03[channel])* (M21[channel]+M03[channel])*(3.0*(M30[channel]+M12[channel])* (M30[channel]+M12[channel])-(M21[channel]+M03[channel])* (M21[channel]+M03[channel])); channel_moments[channel].I[5]=(M20[channel]-M02[channel])* ((M30[channel]+M12[channel])*(M30[channel]+M12[channel])- (M21[channel]+M03[channel])*(M21[channel]+M03[channel]))+ 4.0*M11[channel]*(M30[channel]+M12[channel])*(M21[channel]+M03[channel]); channel_moments[channel].I[6]=(3.0*M21[channel]-M03[channel])* (M30[channel]+M12[channel])*((M30[channel]+M12[channel])* (M30[channel]+M12[channel])-3.0*(M21[channel]+M03[channel])* (M21[channel]+M03[channel]))-(M30[channel]-3*M12[channel])* (M21[channel]+M03[channel])*(3.0*(M30[channel]+M12[channel])* (M30[channel]+M12[channel])-(M21[channel]+M03[channel])* (M21[channel]+M03[channel])); channel_moments[channel].I[7]=M11[channel]*((M30[channel]+M12[channel])* (M30[channel]+M12[channel])-(M03[channel]+M21[channel])* (M03[channel]+M21[channel]))-(M20[channel]-M02[channel])* (M30[channel]+M12[channel])*(M03[channel]+M21[channel]); } if (y < (ssize_t) image->rows) channel_moments=(ChannelMoments *) RelinquishMagickMemory(channel_moments); return(channel_moments); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e C h a n n e l P e r c e p t u a l H a s h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageChannelPerceptualHash() returns the perceptual hash of one or more % image channels. % % The format of the GetImageChannelPerceptualHash method is: % % ChannelPerceptualHash *GetImageChannelPerceptualHash(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static inline double MagickLog10(const double x) { #define Log10Epsilon (1.0e-11) if (fabs(x) < Log10Epsilon) return(log10(Log10Epsilon)); return(log10(fabs(x))); } MagickExport ChannelPerceptualHash *GetImageChannelPerceptualHash( const Image *image,ExceptionInfo *exception) { ChannelMoments *moments; ChannelPerceptualHash *perceptual_hash; Image *hash_image; MagickBooleanType status; register ssize_t i; ssize_t channel; /* Blur then transform to sRGB colorspace. */ hash_image=BlurImage(image,0.0,1.0,exception); if (hash_image == (Image *) NULL) return((ChannelPerceptualHash *) NULL); hash_image->depth=8; status=TransformImageColorspace(hash_image,sRGBColorspace); if (status == MagickFalse) return((ChannelPerceptualHash *) NULL); moments=GetImageChannelMoments(hash_image,exception); hash_image=DestroyImage(hash_image); if (moments == (ChannelMoments *) NULL) return((ChannelPerceptualHash *) NULL); perceptual_hash=(ChannelPerceptualHash *) AcquireQuantumMemory( CompositeChannels+1UL,sizeof(*perceptual_hash)); if (perceptual_hash == (ChannelPerceptualHash *) NULL) return((ChannelPerceptualHash *) NULL); for (channel=0; channel <= CompositeChannels; channel++) for (i=0; i < 7; i++) perceptual_hash[channel].P[i]=(-MagickLog10(moments[channel].I[i])); moments=(ChannelMoments *) RelinquishMagickMemory(moments); /* Blur then transform to HCLp colorspace. */ hash_image=BlurImage(image,0.0,1.0,exception); if (hash_image == (Image *) NULL) { perceptual_hash=(ChannelPerceptualHash *) RelinquishMagickMemory( perceptual_hash); return((ChannelPerceptualHash *) NULL); } hash_image->depth=8; status=TransformImageColorspace(hash_image,HCLpColorspace); if (status == MagickFalse) { perceptual_hash=(ChannelPerceptualHash *) RelinquishMagickMemory( perceptual_hash); return((ChannelPerceptualHash *) NULL); } moments=GetImageChannelMoments(hash_image,exception); hash_image=DestroyImage(hash_image); if (moments == (ChannelMoments *) NULL) { perceptual_hash=(ChannelPerceptualHash *) RelinquishMagickMemory( perceptual_hash); return((ChannelPerceptualHash *) NULL); } for (channel=0; channel <= CompositeChannels; channel++) for (i=0; i < 7; i++) perceptual_hash[channel].Q[i]=(-MagickLog10(moments[channel].I[i])); moments=(ChannelMoments *) RelinquishMagickMemory(moments); return(perceptual_hash); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e C h a n n e l R a n g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageChannelRange() returns the range of one or more image channels. % % The format of the GetImageChannelRange method is: % % MagickBooleanType GetImageChannelRange(const Image *image, % const ChannelType channel,double *minima,double *maxima, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o minima: the minimum value in the channel. % % o maxima: the maximum value in the channel. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetImageRange(const Image *image, double *minima,double *maxima,ExceptionInfo *exception) { return(GetImageChannelRange(image,CompositeChannels,minima,maxima,exception)); } MagickExport MagickBooleanType GetImageChannelRange(const Image *image, const ChannelType channel,double *minima,double *maxima, ExceptionInfo *exception) { MagickPixelPacket pixel; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); *maxima=(-MagickMaximumValue); *minima=MagickMaximumValue; GetMagickPixelPacket(image,&pixel); for (y=0; y < (ssize_t) image->rows; y++) { register const IndexPacket *restrict indexes; register const PixelPacket *restrict p; register ssize_t x; p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; indexes=GetVirtualIndexQueue(image); for (x=0; x < (ssize_t) image->columns; x++) { SetMagickPixelPacket(image,p,indexes+x,&pixel); if ((channel & RedChannel) != 0) { if (pixel.red < *minima) *minima=(double) pixel.red; if (pixel.red > *maxima) *maxima=(double) pixel.red; } if ((channel & GreenChannel) != 0) { if (pixel.green < *minima) *minima=(double) pixel.green; if (pixel.green > *maxima) *maxima=(double) pixel.green; } if ((channel & BlueChannel) != 0) { if (pixel.blue < *minima) *minima=(double) pixel.blue; if (pixel.blue > *maxima) *maxima=(double) pixel.blue; } if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) { if (pixel.opacity < *minima) *minima=(double) pixel.opacity; if (pixel.opacity > *maxima) *maxima=(double) pixel.opacity; } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { if ((double) pixel.index < *minima) *minima=(double) pixel.index; if ((double) pixel.index > *maxima) *maxima=(double) pixel.index; } p++; } } return(y == (ssize_t) image->rows ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e C h a n n e l S t a t i s t i c s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageChannelStatistics() returns statistics for each channel in the % image. The statistics include the channel depth, its minima, maxima, mean, % standard deviation, kurtosis and skewness. You can access the red channel % mean, for example, like this: % % channel_statistics=GetImageChannelStatistics(image,exception); % red_mean=channel_statistics[RedChannel].mean; % % Use MagickRelinquishMemory() to free the statistics buffer. % % The format of the GetImageChannelStatistics method is: % % ChannelStatistics *GetImageChannelStatistics(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport ChannelStatistics *GetImageChannelStatistics(const Image *image, ExceptionInfo *exception) { ChannelStatistics *channel_statistics; double area; QuantumAny range; register ssize_t i; size_t channels, depth, length; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); length=CompositeChannels+1UL; channel_statistics=(ChannelStatistics *) AcquireQuantumMemory(length, sizeof(*channel_statistics)); if (channel_statistics == (ChannelStatistics *) NULL) return(channel_statistics); (void) ResetMagickMemory(channel_statistics,0,length* sizeof(*channel_statistics)); for (i=0; i <= (ssize_t) CompositeChannels; i++) { channel_statistics[i].depth=1; channel_statistics[i].maxima=(-MagickMaximumValue); channel_statistics[i].minima=MagickMaximumValue; } for (y=0; y < (ssize_t) image->rows; y++) { register const IndexPacket *restrict indexes; register const PixelPacket *restrict p; register ssize_t x; p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; indexes=GetVirtualIndexQueue(image); for (x=0; x < (ssize_t) image->columns; ) { if (channel_statistics[RedChannel].depth != MAGICKCORE_QUANTUM_DEPTH) { depth=channel_statistics[RedChannel].depth; range=GetQuantumRange(depth); if (IsPixelAtDepth(GetPixelRed(p),range) == MagickFalse) { channel_statistics[RedChannel].depth++; continue; } } if (channel_statistics[GreenChannel].depth != MAGICKCORE_QUANTUM_DEPTH) { depth=channel_statistics[GreenChannel].depth; range=GetQuantumRange(depth); if (IsPixelAtDepth(GetPixelGreen(p),range) == MagickFalse) { channel_statistics[GreenChannel].depth++; continue; } } if (channel_statistics[BlueChannel].depth != MAGICKCORE_QUANTUM_DEPTH) { depth=channel_statistics[BlueChannel].depth; range=GetQuantumRange(depth); if (IsPixelAtDepth(GetPixelBlue(p),range) == MagickFalse) { channel_statistics[BlueChannel].depth++; continue; } } if (image->matte != MagickFalse) { if (channel_statistics[OpacityChannel].depth != MAGICKCORE_QUANTUM_DEPTH) { depth=channel_statistics[OpacityChannel].depth; range=GetQuantumRange(depth); if (IsPixelAtDepth(GetPixelOpacity(p),range) == MagickFalse) { channel_statistics[OpacityChannel].depth++; continue; } } } if (image->colorspace == CMYKColorspace) { if (channel_statistics[BlackChannel].depth != MAGICKCORE_QUANTUM_DEPTH) { depth=channel_statistics[BlackChannel].depth; range=GetQuantumRange(depth); if (IsPixelAtDepth(GetPixelIndex(indexes+x),range) == MagickFalse) { channel_statistics[BlackChannel].depth++; continue; } } } if ((double) GetPixelRed(p) < channel_statistics[RedChannel].minima) channel_statistics[RedChannel].minima=(double) GetPixelRed(p); if ((double) GetPixelRed(p) > channel_statistics[RedChannel].maxima) channel_statistics[RedChannel].maxima=(double) GetPixelRed(p); channel_statistics[RedChannel].sum+=GetPixelRed(p); channel_statistics[RedChannel].sum_squared+=(double) GetPixelRed(p)* GetPixelRed(p); channel_statistics[RedChannel].sum_cubed+=(double) GetPixelRed(p)*GetPixelRed(p)*GetPixelRed(p); channel_statistics[RedChannel].sum_fourth_power+=(double) GetPixelRed(p)*GetPixelRed(p)*GetPixelRed(p)*GetPixelRed(p); if ((double) GetPixelGreen(p) < channel_statistics[GreenChannel].minima) channel_statistics[GreenChannel].minima=(double) GetPixelGreen(p); if ((double) GetPixelGreen(p) > channel_statistics[GreenChannel].maxima) channel_statistics[GreenChannel].maxima=(double) GetPixelGreen(p); channel_statistics[GreenChannel].sum+=GetPixelGreen(p); channel_statistics[GreenChannel].sum_squared+=(double) GetPixelGreen(p)* GetPixelGreen(p); channel_statistics[GreenChannel].sum_cubed+=(double) GetPixelGreen(p)* GetPixelGreen(p)*GetPixelGreen(p); channel_statistics[GreenChannel].sum_fourth_power+=(double) GetPixelGreen(p)*GetPixelGreen(p)*GetPixelGreen(p)*GetPixelGreen(p); if ((double) GetPixelBlue(p) < channel_statistics[BlueChannel].minima) channel_statistics[BlueChannel].minima=(double) GetPixelBlue(p); if ((double) GetPixelBlue(p) > channel_statistics[BlueChannel].maxima) channel_statistics[BlueChannel].maxima=(double) GetPixelBlue(p); channel_statistics[BlueChannel].sum+=GetPixelBlue(p); channel_statistics[BlueChannel].sum_squared+=(double) GetPixelBlue(p)* GetPixelBlue(p); channel_statistics[BlueChannel].sum_cubed+=(double) GetPixelBlue(p)* GetPixelBlue(p)*GetPixelBlue(p); channel_statistics[BlueChannel].sum_fourth_power+=(double) GetPixelBlue(p)*GetPixelBlue(p)*GetPixelBlue(p)*GetPixelBlue(p); if (image->matte != MagickFalse) { if ((double) GetPixelOpacity(p) < channel_statistics[OpacityChannel].minima) channel_statistics[OpacityChannel].minima=(double) GetPixelOpacity(p); if ((double) GetPixelOpacity(p) > channel_statistics[OpacityChannel].maxima) channel_statistics[OpacityChannel].maxima=(double) GetPixelOpacity(p); channel_statistics[OpacityChannel].sum+=GetPixelOpacity(p); channel_statistics[OpacityChannel].sum_squared+=(double) GetPixelOpacity(p)*GetPixelOpacity(p); channel_statistics[OpacityChannel].sum_cubed+=(double) GetPixelOpacity(p)*GetPixelOpacity(p)*GetPixelOpacity(p); channel_statistics[OpacityChannel].sum_fourth_power+=(double) GetPixelOpacity(p)*GetPixelOpacity(p)*GetPixelOpacity(p)* GetPixelOpacity(p); } if (image->colorspace == CMYKColorspace) { if ((double) GetPixelIndex(indexes+x) < channel_statistics[BlackChannel].minima) channel_statistics[BlackChannel].minima=(double) GetPixelIndex(indexes+x); if ((double) GetPixelIndex(indexes+x) > channel_statistics[BlackChannel].maxima) channel_statistics[BlackChannel].maxima=(double) GetPixelIndex(indexes+x); channel_statistics[BlackChannel].sum+=GetPixelIndex(indexes+x); channel_statistics[BlackChannel].sum_squared+=(double) GetPixelIndex(indexes+x)*GetPixelIndex(indexes+x); channel_statistics[BlackChannel].sum_cubed+=(double) GetPixelIndex(indexes+x)*GetPixelIndex(indexes+x)* GetPixelIndex(indexes+x); channel_statistics[BlackChannel].sum_fourth_power+=(double) GetPixelIndex(indexes+x)*GetPixelIndex(indexes+x)* GetPixelIndex(indexes+x)*GetPixelIndex(indexes+x); } x++; p++; } } area=(double) image->columns*image->rows; for (i=0; i < (ssize_t) CompositeChannels; i++) { double sum; // 'Fix' for Visual Studio compiler optimization issue. sum = channel_statistics[i].sum/area; channel_statistics[i].sum=sum; channel_statistics[i].sum_squared/=area; channel_statistics[i].sum_cubed/=area; channel_statistics[i].sum_fourth_power/=area; channel_statistics[i].mean=sum; channel_statistics[i].variance=channel_statistics[i].sum_squared; channel_statistics[i].standard_deviation=sqrt( channel_statistics[i].variance-(sum*sum)); } for (i=0; i < (ssize_t) CompositeChannels; i++) { channel_statistics[CompositeChannels].depth=(size_t) EvaluateMax((double) channel_statistics[CompositeChannels].depth,(double) channel_statistics[i].depth); channel_statistics[CompositeChannels].minima=MagickMin( channel_statistics[CompositeChannels].minima, channel_statistics[i].minima); channel_statistics[CompositeChannels].maxima=EvaluateMax( channel_statistics[CompositeChannels].maxima, channel_statistics[i].maxima); channel_statistics[CompositeChannels].sum+=channel_statistics[i].sum; channel_statistics[CompositeChannels].sum_squared+= channel_statistics[i].sum_squared; channel_statistics[CompositeChannels].sum_cubed+= channel_statistics[i].sum_cubed; channel_statistics[CompositeChannels].sum_fourth_power+= channel_statistics[i].sum_fourth_power; channel_statistics[CompositeChannels].mean+=channel_statistics[i].mean; channel_statistics[CompositeChannels].variance+= channel_statistics[i].variance-channel_statistics[i].mean* channel_statistics[i].mean; channel_statistics[CompositeChannels].standard_deviation+= channel_statistics[i].variance-channel_statistics[i].mean* channel_statistics[i].mean; } channels=3; if (image->matte != MagickFalse) channels++; if (image->colorspace == CMYKColorspace) channels++; channel_statistics[CompositeChannels].sum/=channels; channel_statistics[CompositeChannels].sum_squared/=channels; channel_statistics[CompositeChannels].sum_cubed/=channels; channel_statistics[CompositeChannels].sum_fourth_power/=channels; channel_statistics[CompositeChannels].mean/=channels; channel_statistics[CompositeChannels].variance/=channels; channel_statistics[CompositeChannels].standard_deviation= sqrt(channel_statistics[CompositeChannels].standard_deviation/channels); channel_statistics[CompositeChannels].kurtosis/=channels; channel_statistics[CompositeChannels].skewness/=channels; for (i=0; i <= (ssize_t) CompositeChannels; i++) { if (channel_statistics[i].standard_deviation == 0.0) continue; channel_statistics[i].skewness=(channel_statistics[i].sum_cubed- 3.0*channel_statistics[i].mean*channel_statistics[i].sum_squared+ 2.0*channel_statistics[i].mean*channel_statistics[i].mean* channel_statistics[i].mean)/(channel_statistics[i].standard_deviation* channel_statistics[i].standard_deviation* channel_statistics[i].standard_deviation); channel_statistics[i].kurtosis=(channel_statistics[i].sum_fourth_power- 4.0*channel_statistics[i].mean*channel_statistics[i].sum_cubed+ 6.0*channel_statistics[i].mean*channel_statistics[i].mean* channel_statistics[i].sum_squared-3.0*channel_statistics[i].mean* channel_statistics[i].mean*1.0*channel_statistics[i].mean* channel_statistics[i].mean)/(channel_statistics[i].standard_deviation* channel_statistics[i].standard_deviation* channel_statistics[i].standard_deviation* channel_statistics[i].standard_deviation)-3.0; } if (y < (ssize_t) image->rows) channel_statistics=(ChannelStatistics *) RelinquishMagickMemory( channel_statistics); return(channel_statistics); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P o l y n o m i a l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PolynomialImage() returns a new image where each pixel is the sum of the % pixels in the image sequence after applying its corresponding terms % (coefficient and degree pairs). % % The format of the PolynomialImage method is: % % Image *PolynomialImage(const Image *images,const size_t number_terms, % const double *terms,ExceptionInfo *exception) % Image *PolynomialImageChannel(const Image *images, % const size_t number_terms,const ChannelType channel, % const double *terms,ExceptionInfo *exception) % % A description of each parameter follows: % % o images: the image sequence. % % o channel: the channel. % % o number_terms: the number of terms in the list. The actual list length % is 2 x number_terms + 1 (the constant). % % o terms: the list of polynomial coefficients and degree pairs and a % constant. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *PolynomialImage(const Image *images, const size_t number_terms,const double *terms,ExceptionInfo *exception) { Image *polynomial_image; polynomial_image=PolynomialImageChannel(images,DefaultChannels,number_terms, terms,exception); return(polynomial_image); } MagickExport Image *PolynomialImageChannel(const Image *images, const ChannelType channel,const size_t number_terms,const double *terms, ExceptionInfo *exception) { #define PolynomialImageTag "Polynomial/Image" CacheView *polynomial_view; Image *image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket **restrict polynomial_pixels, zero; size_t number_images; ssize_t y; assert(images != (Image *) NULL); assert(images->signature == MagickSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); image=CloneImage(images,images->columns,images->rows,MagickTrue,exception); if (image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(image,DirectClass) == MagickFalse) { InheritException(exception,&image->exception); image=DestroyImage(image); return((Image *) NULL); } number_images=GetImageListLength(images); polynomial_pixels=AcquirePixelThreadSet(images,number_images); if (polynomial_pixels == (MagickPixelPacket **) NULL) { image=DestroyImage(image); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename); return((Image *) NULL); } /* Polynomial image pixels. */ status=MagickTrue; progress=0; GetMagickPixelPacket(images,&zero); polynomial_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { CacheView *image_view; const Image *next; const int id = GetOpenMPThreadId(); register IndexPacket *restrict polynomial_indexes; register MagickPixelPacket *polynomial_pixel; register PixelPacket *restrict q; register ssize_t i, x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(polynomial_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } polynomial_indexes=GetCacheViewAuthenticIndexQueue(polynomial_view); polynomial_pixel=polynomial_pixels[id]; for (x=0; x < (ssize_t) image->columns; x++) polynomial_pixel[x]=zero; next=images; for (i=0; i < (ssize_t) number_images; i++) { register const IndexPacket *indexes; register const PixelPacket *p; if (i >= (ssize_t) number_terms) break; image_view=AcquireVirtualCacheView(next,exception); p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) { image_view=DestroyCacheView(image_view); break; } indexes=GetCacheViewVirtualIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { double coefficient, degree; coefficient=terms[i << 1]; degree=terms[(i << 1)+1]; if ((channel & RedChannel) != 0) polynomial_pixel[x].red+=coefficient*pow(QuantumScale*p->red,degree); if ((channel & GreenChannel) != 0) polynomial_pixel[x].green+=coefficient*pow(QuantumScale*p->green, degree); if ((channel & BlueChannel) != 0) polynomial_pixel[x].blue+=coefficient*pow(QuantumScale*p->blue, degree); if ((channel & OpacityChannel) != 0) polynomial_pixel[x].opacity+=coefficient*pow(QuantumScale* (QuantumRange-p->opacity),degree); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) polynomial_pixel[x].index+=coefficient*pow(QuantumScale*indexes[x], degree); p++; } image_view=DestroyCacheView(image_view); next=GetNextImageInList(next); } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q,ClampToQuantum(QuantumRange*polynomial_pixel[x].red)); SetPixelGreen(q,ClampToQuantum(QuantumRange*polynomial_pixel[x].green)); SetPixelBlue(q,ClampToQuantum(QuantumRange*polynomial_pixel[x].blue)); if (image->matte == MagickFalse) SetPixelOpacity(q,ClampToQuantum(QuantumRange-QuantumRange* polynomial_pixel[x].opacity)); else SetPixelAlpha(q,ClampToQuantum(QuantumRange-QuantumRange* polynomial_pixel[x].opacity)); if (image->colorspace == CMYKColorspace) SetPixelIndex(polynomial_indexes+x,ClampToQuantum(QuantumRange* polynomial_pixel[x].index)); q++; } if (SyncCacheViewAuthenticPixels(polynomial_view,exception) == MagickFalse) status=MagickFalse; if (images->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_PolynomialImages) #endif proceed=SetImageProgress(images,PolynomialImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } polynomial_view=DestroyCacheView(polynomial_view); polynomial_pixels=DestroyPixelThreadSet(polynomial_pixels); if (status == MagickFalse) image=DestroyImage(image); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S t a t i s t i c I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % StatisticImage() makes each pixel the min / max / median / mode / etc. of % the neighborhood of the specified width and height. % % The format of the StatisticImage method is: % % Image *StatisticImage(const Image *image,const StatisticType type, % const size_t width,const size_t height,ExceptionInfo *exception) % Image *StatisticImageChannel(const Image *image, % const ChannelType channel,const StatisticType type, % const size_t width,const size_t height,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the image channel. % % o type: the statistic type (median, mode, etc.). % % o width: the width of the pixel neighborhood. % % o height: the height of the pixel neighborhood. % % o exception: return any errors or warnings in this structure. % */ #define ListChannels 5 typedef struct _ListNode { size_t next[9], count, signature; } ListNode; typedef struct _SkipList { ssize_t level; ListNode *nodes; } SkipList; typedef struct _PixelList { size_t length, seed, signature; SkipList lists[ListChannels]; } PixelList; static PixelList *DestroyPixelList(PixelList *pixel_list) { register ssize_t i; if (pixel_list == (PixelList *) NULL) return((PixelList *) NULL); for (i=0; i < ListChannels; i++) if (pixel_list->lists[i].nodes != (ListNode *) NULL) pixel_list->lists[i].nodes=(ListNode *) RelinquishMagickMemory( pixel_list->lists[i].nodes); pixel_list=(PixelList *) RelinquishMagickMemory(pixel_list); return(pixel_list); } static PixelList **DestroyPixelListThreadSet(PixelList **pixel_list) { register ssize_t i; assert(pixel_list != (PixelList **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (pixel_list[i] != (PixelList *) NULL) pixel_list[i]=DestroyPixelList(pixel_list[i]); pixel_list=(PixelList **) RelinquishMagickMemory(pixel_list); return(pixel_list); } static PixelList *AcquirePixelList(const size_t width,const size_t height) { PixelList *pixel_list; register ssize_t i; pixel_list=(PixelList *) AcquireMagickMemory(sizeof(*pixel_list)); if (pixel_list == (PixelList *) NULL) return(pixel_list); (void) ResetMagickMemory((void *) pixel_list,0,sizeof(*pixel_list)); pixel_list->length=width*height; for (i=0; i < ListChannels; i++) { pixel_list->lists[i].nodes=(ListNode *) AcquireQuantumMemory(65537UL, sizeof(*pixel_list->lists[i].nodes)); if (pixel_list->lists[i].nodes == (ListNode *) NULL) return(DestroyPixelList(pixel_list)); (void) ResetMagickMemory(pixel_list->lists[i].nodes,0,65537UL* sizeof(*pixel_list->lists[i].nodes)); } pixel_list->signature=MagickSignature; return(pixel_list); } static PixelList **AcquirePixelListThreadSet(const size_t width, const size_t height) { PixelList **pixel_list; register ssize_t i; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); pixel_list=(PixelList **) AcquireQuantumMemory(number_threads, sizeof(*pixel_list)); if (pixel_list == (PixelList **) NULL) return((PixelList **) NULL); (void) ResetMagickMemory(pixel_list,0,number_threads*sizeof(*pixel_list)); for (i=0; i < (ssize_t) number_threads; i++) { pixel_list[i]=AcquirePixelList(width,height); if (pixel_list[i] == (PixelList *) NULL) return(DestroyPixelListThreadSet(pixel_list)); } return(pixel_list); } static void AddNodePixelList(PixelList *pixel_list,const ssize_t channel, const size_t color) { register SkipList *list; register ssize_t level; size_t search, update[9]; /* Initialize the node. */ list=pixel_list->lists+channel; list->nodes[color].signature=pixel_list->signature; list->nodes[color].count=1; /* Determine where it belongs in the list. */ search=65536UL; for (level=list->level; level >= 0; level--) { while (list->nodes[search].next[level] < color) search=list->nodes[search].next[level]; update[level]=search; } /* Generate a pseudo-random level for this node. */ for (level=0; ; level++) { pixel_list->seed=(pixel_list->seed*42893621L)+1L; if ((pixel_list->seed & 0x300) != 0x300) break; } if (level > 8) level=8; if (level > (list->level+2)) level=list->level+2; /* If we're raising the list's level, link back to the root node. */ while (level > list->level) { list->level++; update[list->level]=65536UL; } /* Link the node into the skip-list. */ do { list->nodes[color].next[level]=list->nodes[update[level]].next[level]; list->nodes[update[level]].next[level]=color; } while (level-- > 0); } static void GetMaximumPixelList(PixelList *pixel_list,MagickPixelPacket *pixel) { register SkipList *list; register ssize_t channel; size_t color, maximum; ssize_t count; unsigned short channels[ListChannels]; /* Find the maximum value for each of the color. */ for (channel=0; channel < 5; channel++) { list=pixel_list->lists+channel; color=65536L; count=0; maximum=list->nodes[color].next[0]; do { color=list->nodes[color].next[0]; if (color > maximum) maximum=color; count+=list->nodes[color].count; } while (count < (ssize_t) pixel_list->length); channels[channel]=(unsigned short) maximum; } pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]); pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]); pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]); pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]); pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]); } static void GetMeanPixelList(PixelList *pixel_list,MagickPixelPacket *pixel) { MagickRealType sum; register SkipList *list; register ssize_t channel; size_t color; ssize_t count; unsigned short channels[ListChannels]; /* Find the mean value for each of the color. */ for (channel=0; channel < 5; channel++) { list=pixel_list->lists+channel; color=65536L; count=0; sum=0.0; do { color=list->nodes[color].next[0]; sum+=(MagickRealType) list->nodes[color].count*color; count+=list->nodes[color].count; } while (count < (ssize_t) pixel_list->length); sum/=pixel_list->length; channels[channel]=(unsigned short) sum; } pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]); pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]); pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]); pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]); pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]); } static void GetMedianPixelList(PixelList *pixel_list,MagickPixelPacket *pixel) { register SkipList *list; register ssize_t channel; size_t color; ssize_t count; unsigned short channels[ListChannels]; /* Find the median value for each of the color. */ for (channel=0; channel < 5; channel++) { list=pixel_list->lists+channel; color=65536L; count=0; do { color=list->nodes[color].next[0]; count+=list->nodes[color].count; } while (count <= (ssize_t) (pixel_list->length >> 1)); channels[channel]=(unsigned short) color; } GetMagickPixelPacket((const Image *) NULL,pixel); pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]); pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]); pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]); pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]); pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]); } static void GetMinimumPixelList(PixelList *pixel_list,MagickPixelPacket *pixel) { register SkipList *list; register ssize_t channel; size_t color, minimum; ssize_t count; unsigned short channels[ListChannels]; /* Find the minimum value for each of the color. */ for (channel=0; channel < 5; channel++) { list=pixel_list->lists+channel; count=0; color=65536UL; minimum=list->nodes[color].next[0]; do { color=list->nodes[color].next[0]; if (color < minimum) minimum=color; count+=list->nodes[color].count; } while (count < (ssize_t) pixel_list->length); channels[channel]=(unsigned short) minimum; } pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]); pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]); pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]); pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]); pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]); } static void GetModePixelList(PixelList *pixel_list,MagickPixelPacket *pixel) { register SkipList *list; register ssize_t channel; size_t color, max_count, mode; ssize_t count; unsigned short channels[5]; /* Make each pixel the 'predominant color' of the specified neighborhood. */ for (channel=0; channel < 5; channel++) { list=pixel_list->lists+channel; color=65536L; mode=color; max_count=list->nodes[mode].count; count=0; do { color=list->nodes[color].next[0]; if (list->nodes[color].count > max_count) { mode=color; max_count=list->nodes[mode].count; } count+=list->nodes[color].count; } while (count < (ssize_t) pixel_list->length); channels[channel]=(unsigned short) mode; } pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]); pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]); pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]); pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]); pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]); } static void GetNonpeakPixelList(PixelList *pixel_list,MagickPixelPacket *pixel) { register SkipList *list; register ssize_t channel; size_t color, next, previous; ssize_t count; unsigned short channels[5]; /* Finds the non peak value for each of the colors. */ for (channel=0; channel < 5; channel++) { list=pixel_list->lists+channel; color=65536L; next=list->nodes[color].next[0]; count=0; do { previous=color; color=next; next=list->nodes[color].next[0]; count+=list->nodes[color].count; } while (count <= (ssize_t) (pixel_list->length >> 1)); if ((previous == 65536UL) && (next != 65536UL)) color=next; else if ((previous != 65536UL) && (next == 65536UL)) color=previous; channels[channel]=(unsigned short) color; } pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]); pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]); pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]); pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]); pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]); } static void GetStandardDeviationPixelList(PixelList *pixel_list, MagickPixelPacket *pixel) { MagickRealType sum, sum_squared; register SkipList *list; register ssize_t channel; size_t color; ssize_t count; unsigned short channels[ListChannels]; /* Find the standard-deviation value for each of the color. */ for (channel=0; channel < 5; channel++) { list=pixel_list->lists+channel; color=65536L; count=0; sum=0.0; sum_squared=0.0; do { register ssize_t i; color=list->nodes[color].next[0]; sum+=(MagickRealType) list->nodes[color].count*color; for (i=0; i < (ssize_t) list->nodes[color].count; i++) sum_squared+=((MagickRealType) color)*((MagickRealType) color); count+=list->nodes[color].count; } while (count < (ssize_t) pixel_list->length); sum/=pixel_list->length; sum_squared/=pixel_list->length; channels[channel]=(unsigned short) sqrt(sum_squared-(sum*sum)); } pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]); pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]); pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]); pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]); pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]); } static inline void InsertPixelList(const Image *image,const PixelPacket *pixel, const IndexPacket *indexes,PixelList *pixel_list) { size_t signature; unsigned short index; index=ScaleQuantumToShort(GetPixelRed(pixel)); signature=pixel_list->lists[0].nodes[index].signature; if (signature == pixel_list->signature) pixel_list->lists[0].nodes[index].count++; else AddNodePixelList(pixel_list,0,index); index=ScaleQuantumToShort(GetPixelGreen(pixel)); signature=pixel_list->lists[1].nodes[index].signature; if (signature == pixel_list->signature) pixel_list->lists[1].nodes[index].count++; else AddNodePixelList(pixel_list,1,index); index=ScaleQuantumToShort(GetPixelBlue(pixel)); signature=pixel_list->lists[2].nodes[index].signature; if (signature == pixel_list->signature) pixel_list->lists[2].nodes[index].count++; else AddNodePixelList(pixel_list,2,index); index=ScaleQuantumToShort(GetPixelOpacity(pixel)); signature=pixel_list->lists[3].nodes[index].signature; if (signature == pixel_list->signature) pixel_list->lists[3].nodes[index].count++; else AddNodePixelList(pixel_list,3,index); if (image->colorspace == CMYKColorspace) index=ScaleQuantumToShort(GetPixelIndex(indexes)); signature=pixel_list->lists[4].nodes[index].signature; if (signature == pixel_list->signature) pixel_list->lists[4].nodes[index].count++; else AddNodePixelList(pixel_list,4,index); } static inline MagickRealType MagickAbsoluteValue(const MagickRealType x) { if (x < 0) return(-x); return(x); } static void ResetPixelList(PixelList *pixel_list) { int level; register ListNode *root; register SkipList *list; register ssize_t channel; /* Reset the skip-list. */ for (channel=0; channel < 5; channel++) { list=pixel_list->lists+channel; root=list->nodes+65536UL; list->level=0; for (level=0; level < 9; level++) root->next[level]=65536UL; } pixel_list->seed=pixel_list->signature++; } MagickExport Image *StatisticImage(const Image *image,const StatisticType type, const size_t width,const size_t height,ExceptionInfo *exception) { Image *statistic_image; statistic_image=StatisticImageChannel(image,DefaultChannels,type,width, height,exception); return(statistic_image); } MagickExport Image *StatisticImageChannel(const Image *image, const ChannelType channel,const StatisticType type,const size_t width, const size_t height,ExceptionInfo *exception) { #define StatisticImageTag "Statistic/Image" CacheView *image_view, *statistic_view; Image *statistic_image; MagickBooleanType status; MagickOffsetType progress; PixelList **restrict pixel_list; size_t neighbor_height, neighbor_width; ssize_t y; /* Initialize statistics image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); statistic_image=CloneImage(image,image->columns,image->rows,MagickTrue, exception); if (statistic_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(statistic_image,DirectClass) == MagickFalse) { InheritException(exception,&statistic_image->exception); statistic_image=DestroyImage(statistic_image); return((Image *) NULL); } neighbor_width=width == 0 ? GetOptimalKernelWidth2D((double) width,0.5) : width; neighbor_height=height == 0 ? GetOptimalKernelWidth2D((double) height,0.5) : height; pixel_list=AcquirePixelListThreadSet(neighbor_width,neighbor_height); if (pixel_list == (PixelList **) NULL) { statistic_image=DestroyImage(statistic_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Make each pixel the min / max / median / mode / etc. of the neighborhood. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); statistic_view=AcquireAuthenticCacheView(statistic_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,statistic_image,statistic_image->rows,1) #endif for (y=0; y < (ssize_t) statistic_image->rows; y++) { const int id = GetOpenMPThreadId(); register const IndexPacket *restrict indexes; register const PixelPacket *restrict p; register IndexPacket *restrict statistic_indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-((ssize_t) neighbor_width/2L),y- (ssize_t) (neighbor_height/2L),image->columns+neighbor_width, neighbor_height,exception); q=QueueCacheViewAuthenticPixels(statistic_view,0,y,statistic_image->columns, 1,exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); statistic_indexes=GetCacheViewAuthenticIndexQueue(statistic_view); for (x=0; x < (ssize_t) statistic_image->columns; x++) { MagickPixelPacket pixel; register const IndexPacket *restrict s; register const PixelPacket *restrict r; register ssize_t u, v; r=p; s=indexes+x; ResetPixelList(pixel_list[id]); for (v=0; v < (ssize_t) neighbor_height; v++) { for (u=0; u < (ssize_t) neighbor_width; u++) InsertPixelList(image,r+u,s+u,pixel_list[id]); r+=image->columns+neighbor_width; s+=image->columns+neighbor_width; } GetMagickPixelPacket(image,&pixel); SetMagickPixelPacket(image,p+neighbor_width*neighbor_height/2,indexes+x+ neighbor_width*neighbor_height/2,&pixel); switch (type) { case GradientStatistic: { MagickPixelPacket maximum, minimum; GetMinimumPixelList(pixel_list[id],&pixel); minimum=pixel; GetMaximumPixelList(pixel_list[id],&pixel); maximum=pixel; pixel.red=MagickAbsoluteValue(maximum.red-minimum.red); pixel.green=MagickAbsoluteValue(maximum.green-minimum.green); pixel.blue=MagickAbsoluteValue(maximum.blue-minimum.blue); pixel.opacity=MagickAbsoluteValue(maximum.opacity-minimum.opacity); if (image->colorspace == CMYKColorspace) pixel.index=MagickAbsoluteValue(maximum.index-minimum.index); break; } case MaximumStatistic: { GetMaximumPixelList(pixel_list[id],&pixel); break; } case MeanStatistic: { GetMeanPixelList(pixel_list[id],&pixel); break; } case MedianStatistic: default: { GetMedianPixelList(pixel_list[id],&pixel); break; } case MinimumStatistic: { GetMinimumPixelList(pixel_list[id],&pixel); break; } case ModeStatistic: { GetModePixelList(pixel_list[id],&pixel); break; } case NonpeakStatistic: { GetNonpeakPixelList(pixel_list[id],&pixel); break; } case StandardDeviationStatistic: { GetStandardDeviationPixelList(pixel_list[id],&pixel); break; } } if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(pixel.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(pixel.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(pixel.blue)); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,ClampToQuantum(pixel.opacity)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(statistic_indexes+x,ClampToQuantum(pixel.index)); p++; q++; } if (SyncCacheViewAuthenticPixels(statistic_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_StatisticImage) #endif proceed=SetImageProgress(image,StatisticImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } statistic_view=DestroyCacheView(statistic_view); image_view=DestroyCacheView(image_view); pixel_list=DestroyPixelListThreadSet(pixel_list); if (status == MagickFalse) statistic_image=DestroyImage(statistic_image); return(statistic_image); }
THTensorMath.c
#ifndef TH_GENERIC_FILE #define TH_GENERIC_FILE "generic/THTensorMath.c" #else #define TH_OMP_OVERHEAD_THRESHOLD 100000 void THTensor_(fill)(THTensor *r_, real value) { TH_TENSOR_APPLY(real, r_, THVector_(fill)(r__data, value, r__size); break;); } void THTensor_(zero)(THTensor *r_) { TH_TENSOR_APPLY(real, r_, THVector_(fill)(r__data, 0, r__size); break;); } void THTensor_(maskedFill)(THTensor *tensor, THByteTensor *mask, real value) { TH_TENSOR_APPLY2(real, tensor, unsigned char, mask, if (*mask_data > 1) THError("Mask tensor can take 0 and 1 values only"); else if (*mask_data == 1) *tensor_data = value;); } void THTensor_(maskedCopy)(THTensor *tensor, THByteTensor *mask, THTensor* src ) { THTensor *srct = THTensor_(newContiguous)(src); real *src_data = THTensor_(data)(srct); long cntr = 0; long nelem = THTensor_(nElement)(srct); if (THTensor_(nElement)(tensor) != THByteTensor_nElement(mask)) { THTensor_(free)(srct); THError("Number of elements of destination tensor != Number of elements in mask"); } TH_TENSOR_APPLY2(real, tensor, unsigned char, mask, if (*mask_data > 1) { THTensor_(free)(srct); THError("Mask tensor can take 0 and 1 values only"); } else if (*mask_data == 1) { if (cntr == nelem) { THTensor_(free)(srct); THError("Number of elements of src < number of ones in mask"); } *tensor_data = *src_data; src_data++; cntr++; }); THTensor_(free)(srct); } void THTensor_(maskedSelect)(THTensor *tensor, THTensor *src, THByteTensor *mask) { long numel = THByteTensor_sumall(mask); real *tensor_data; THTensor_(resize1d)(tensor,numel); tensor_data = THTensor_(data)(tensor); TH_TENSOR_APPLY2(real, src, unsigned char, mask, if (*mask_data > 1) { THError("Mask tensor can take 0 and 1 values only"); } else if (*mask_data == 1) { *tensor_data = *src_data; tensor_data++; }); } // Finds non-zero elements of a tensor and returns their subscripts void THTensor_(nonzero)(THLongTensor *subscript, THTensor *tensor) { long numel = 0; long *subscript_data; long i = 0; long dim; long div = 1; /* First Pass to determine size of subscripts */ TH_TENSOR_APPLY(real, tensor, if (*tensor_data != 0) { ++numel; }); THLongTensor_resize2d(subscript, numel, tensor->nDimension); /* Second pass populates subscripts */ subscript_data = THLongTensor_data(subscript); TH_TENSOR_APPLY(real, tensor, if (*tensor_data != 0) { div = 1; for (dim = tensor->nDimension - 1; dim >= 0; dim--) { *(subscript_data + dim) = (i/div) % tensor->size[dim]; div *= tensor->size[dim]; } subscript_data += tensor->nDimension; } ++i;); } void THTensor_(indexSelect)(THTensor *tensor, THTensor *src, int dim, THLongTensor *index) { long i, numel; THLongStorage *newSize; THTensor *tSlice, *sSlice; long *index_data; real *tensor_data, *src_data; THArgCheck(index->nDimension == 1, 3, "Index is supposed to be a vector"); THArgCheck(dim < src->nDimension, 4,"Indexing dim %d is out of bounds of tensor", dim+1); THArgCheck(src->nDimension > 0,2,"Source tensor is empty"); numel = THLongTensor_nElement(index); newSize = THLongStorage_newWithSize(src->nDimension); THLongStorage_rawCopy(newSize,src->size); newSize->data[dim] = numel; THTensor_(resize)(tensor,newSize,NULL); THLongStorage_free(newSize); index = THLongTensor_newContiguous(index); index_data = THLongTensor_data(index); if (dim == 0 && THTensor_(isContiguous)(src) && THTensor_(isContiguous)(tensor)) { tensor_data = THTensor_(data)(tensor); src_data = THTensor_(data)(src); long rowsize = THTensor_(nElement)(src) / src->size[0]; // check that the indices are within range long max = src->size[0]; for (i=0; i<numel; i++) { if (index_data[i] < 1 || index_data[i] > max) { THLongTensor_free(index); THError("index out of range"); } } if (src->nDimension == 1) { #pragma omp parallel for if(numel > TH_OMP_OVERHEAD_THRESHOLD) private(i) for (i=0; i<numel; i++) tensor_data[i] = src_data[index_data[i]-1]; } else { #pragma omp parallel for if(numel*rowsize > TH_OMP_OVERHEAD_THRESHOLD) private(i) for (i=0; i<numel; i++) memcpy(tensor_data + i*rowsize, src_data + (index_data[i]-1)*rowsize, rowsize*sizeof(real)); } } else if (src->nDimension == 1) { for (i=0; i<numel; i++) THTensor_(set1d)(tensor,i,THTensor_(get1d)(src,index_data[i]-1)); } else { for (i=0; i<numel; i++) { tSlice = THTensor_(new)(); sSlice = THTensor_(new)(); THTensor_(select)(tSlice, tensor, dim, i); THTensor_(select)(sSlice, src, dim, index_data[i]-1); THTensor_(copy)(tSlice, sSlice); THTensor_(free)(tSlice); THTensor_(free)(sSlice); } } THLongTensor_free(index); } void THTensor_(indexCopy)(THTensor *tensor, int dim, THLongTensor *index, THTensor *src) { long i, numel; THTensor *tSlice, *sSlice; long *index_data; numel = THLongTensor_nElement(index); THArgCheck(index->nDimension == 1, 3, "Index is supposed to be a vector"); THArgCheck(dim < src->nDimension, 4,"Indexing dim %d is out of bounds of tensor", dim+1); THArgCheck(numel == src->size[dim],4,"Number of indices should be equal to source:size(dim)"); index = THLongTensor_newContiguous(index); index_data = THLongTensor_data(index); if (tensor->nDimension > 1 ) { tSlice = THTensor_(new)(); sSlice = THTensor_(new)(); for (i=0; i<numel; i++) { THTensor_(select)(tSlice, tensor, dim, index_data[i]-1); THTensor_(select)(sSlice, src, dim, i); THTensor_(copy)(tSlice, sSlice); } THTensor_(free)(tSlice); THTensor_(free)(sSlice); } else { for (i=0; i<numel; i++) { THTensor_(set1d)(tensor,index_data[i]-1,THTensor_(get1d)(src,i)); } } THLongTensor_free(index); } void THTensor_(indexFill)(THTensor *tensor, int dim, THLongTensor *index, real val) { long i, numel; THTensor *tSlice; long *index_data; numel = THLongTensor_nElement(index); THArgCheck(index->nDimension == 1, 3, "Index is supposed to be a vector"); THArgCheck(dim < tensor->nDimension, 4,"Indexing dim %d is out of bounds of tensor", dim+1); index = THLongTensor_newContiguous(index); index_data = THLongTensor_data(index); for (i=0; i<numel; i++) { if (tensor->nDimension > 1 ) { tSlice = THTensor_(new)(); THTensor_(select)(tSlice, tensor,dim,index_data[i]-1); THTensor_(fill)(tSlice, val); THTensor_(free)(tSlice); } else { THTensor_(set1d)(tensor,index_data[i]-1,val); } } THLongTensor_free(index); } void THTensor_(gather)(THTensor *tensor, THTensor *src, int dim, THLongTensor *index) { long elems_per_row, i, idx; THArgCheck(THTensor_(nDimension)(src) == THTensor_(nDimension)(tensor), 2, "Input tensor must have same dimensions as output tensor"); THArgCheck(dim < THTensor_(nDimension)(tensor), 3, "Index dimension is out of bounds"); THArgCheck(THLongTensor_nDimension(index) == THTensor_(nDimension)(src), 4, "Index tensor must have same dimensions as input tensor"); elems_per_row = THLongTensor_size(index, dim); TH_TENSOR_DIM_APPLY3(real, tensor, real, src, long, index, dim, for (i = 0; i < elems_per_row; ++i) { idx = *(index_data + i*index_stride); if (idx < 1 || idx > src_size) THError("Invalid index in gather"); *(tensor_data + i*tensor_stride) = src_data[(idx - 1) * src_stride]; }) } void THTensor_(scatter)(THTensor *tensor, int dim, THLongTensor *index, THTensor *src) { long elems_per_row, i, idx; THArgCheck(dim < THTensor_(nDimension)(tensor), 2, "Index dimension is out of bounds"); THArgCheck(THLongTensor_nDimension(index) == THTensor_(nDimension)(tensor), 3, "Index tensor must have same dimensions as output tensor"); THArgCheck(THTensor_(nDimension)(src) == THTensor_(nDimension)(tensor), 4, "Input tensor must have same dimensions as output tensor"); elems_per_row = THLongTensor_size(index, dim); TH_TENSOR_DIM_APPLY3(real, tensor, real, src, long, index, dim, for (i = 0; i < elems_per_row; ++i) { idx = *(index_data + i*index_stride); if (idx < 1 || idx > tensor_size) THError("Invalid index in scatter"); tensor_data[(idx - 1) * tensor_stride] = *(src_data + i*src_stride); }) } void THTensor_(scatterFill)(THTensor *tensor, int dim, THLongTensor *index, real val) { long elems_per_row, i, idx; THArgCheck(dim < THTensor_(nDimension)(tensor), 2, "Index dimension is out of bounds"); THArgCheck(THLongTensor_nDimension(index) == THTensor_(nDimension)(tensor), 3, "Index tensor must have same dimensions as output tensor"); elems_per_row = THLongTensor_size(index, dim); TH_TENSOR_DIM_APPLY2(real, tensor, long, index, dim, for (i = 0; i < elems_per_row; ++i) { idx = *(index_data + i*index_stride); if (idx < 1 || idx > tensor_size) THError("Invalid index in scatter"); tensor_data[(idx - 1) * tensor_stride] = val; }) } accreal THTensor_(dot)(THTensor *tensor, THTensor *src) { accreal sum = 0; /* we use a trick here. careful with that. */ TH_TENSOR_APPLY2(real, tensor, real, src, long sz = (tensor_size-tensor_i < src_size-src_i ? tensor_size-tensor_i : src_size-src_i); sum += THBlas_(dot)(sz, src_data, src_stride, tensor_data, tensor_stride); tensor_i += sz; src_i += sz; tensor_data += sz*tensor_stride; src_data += sz*src_stride; break;); return sum; } real THTensor_(minall)(THTensor *tensor) { real theMin; real value; THArgCheck(tensor->nDimension > 0, 1, "tensor must have one dimension"); theMin = THTensor_(data)(tensor)[0]; TH_TENSOR_APPLY(real, tensor, value = *tensor_data; /* This is not the same as value<theMin in the case of NaNs */ if(!(value >= theMin)) { theMin = value; if (isnan(value)) break; }); return theMin; } real THTensor_(maxall)(THTensor *tensor) { real theMax; real value; THArgCheck(tensor->nDimension > 0, 1, "tensor must have one dimension"); theMax = THTensor_(data)(tensor)[0]; TH_TENSOR_APPLY(real, tensor, value = *tensor_data; /* This is not the same as value>theMax in the case of NaNs */ if(!(value <= theMax)) { theMax = value; if (isnan(value)) break; }); return theMax; } accreal THTensor_(sumall)(THTensor *tensor) { accreal sum = 0; TH_TENSOR_APPLY(real, tensor, sum += *tensor_data;); return sum; } accreal THTensor_(prodall)(THTensor *tensor) { accreal prod = 1; TH_TENSOR_APPLY(real, tensor, prod *= *tensor_data;); return prod; } void THTensor_(add)(THTensor *r_, THTensor *t, real value) { THTensor_(resizeAs)(r_, t); if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(nElement)(r_) == THTensor_(nElement)(t)) { real *tp = THTensor_(data)(t); real *rp = THTensor_(data)(r_); long sz = THTensor_(nElement)(t); long i; #pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD) private(i) for (i=0; i<sz; i++) rp[i] = tp[i] + value; } else { TH_TENSOR_APPLY2(real, r_, real, t, *r__data = *t_data + value;); } } void THTensor_(mul)(THTensor *r_, THTensor *t, real value) { THTensor_(resizeAs)(r_, t); if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(nElement)(r_) == THTensor_(nElement)(t)) { real *tp = THTensor_(data)(t); real *rp = THTensor_(data)(r_); long sz = THTensor_(nElement)(t); long i; #pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD) private(i) for (i=0; i<sz; i++) rp[i] = tp[i] * value; } else { TH_TENSOR_APPLY2(real, r_, real, t, *r__data = *t_data * value;); } } void THTensor_(div)(THTensor *r_, THTensor *t, real value) { THTensor_(resizeAs)(r_, t); if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(nElement)(r_) == THTensor_(nElement)(t)) { real *tp = THTensor_(data)(t); real *rp = THTensor_(data)(r_); long sz = THTensor_(nElement)(t); long i; #pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD) private(i) for (i=0; i<sz; i++) rp[i] = tp[i] / value; } else { TH_TENSOR_APPLY2(real, r_, real, t, *r__data = *t_data / value;); } } void THTensor_(clamp)(THTensor *r_, THTensor *t, real min_value, real max_value) { THTensor_(resizeAs)(r_, t); if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(nElement)(r_) == THTensor_(nElement)(t)) { real *tp = THTensor_(data)(t); real *rp = THTensor_(data)(r_); real t_val; long sz = THTensor_(nElement)(t); long i; #pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD) private(i) for (i=0; i<sz; i++) rp[i] = (tp[i] < min_value) ? min_value : (tp[i] > max_value ? max_value : tp[i]); } else { TH_TENSOR_APPLY2(real, r_, real, t, *r__data = (*t_data < min_value) ? min_value : (*t_data > max_value ? max_value : *t_data);); } } void THTensor_(cadd)(THTensor *r_, THTensor *t, real value, THTensor *src) { THTensor_(resizeAs)(r_, t); if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(isContiguous)(src) && THTensor_(nElement)(r_) == THTensor_(nElement)(src)) { if(r_ == t) { THBlas_(axpy)(THTensor_(nElement)(t), value, THTensor_(data)(src), 1, THTensor_(data)(r_), 1); } else { real *tp = THTensor_(data)(t); real *sp = THTensor_(data)(src); real *rp = THTensor_(data)(r_); long sz = THTensor_(nElement)(t); long i; #pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD) private(i) for (i=0; i< sz; i++) rp[i] = tp[i] + value * sp[i]; } } else { TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data + value * *src_data;); } } void THTensor_(cmul)(THTensor *r_, THTensor *t, THTensor *src) { THTensor_(resizeAs)(r_, t); if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(isContiguous)(src) && THTensor_(nElement)(r_) == THTensor_(nElement)(src)) { real *tp = THTensor_(data)(t); real *sp = THTensor_(data)(src); real *rp = THTensor_(data)(r_); long sz = THTensor_(nElement)(t); long i; #pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD) private(i) for (i=0; i<sz; i++) rp[i] = tp[i] * sp[i]; } else { TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data * *src_data;); } } void THTensor_(cpow)(THTensor *r_, THTensor *t, THTensor *src) { THTensor_(resizeAs)(r_, t); if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(isContiguous)(src) && THTensor_(nElement)(r_) == THTensor_(nElement)(src)) { real *tp = THTensor_(data)(t); real *sp = THTensor_(data)(src); real *rp = THTensor_(data)(r_); long sz = THTensor_(nElement)(t); long i; #pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD) private(i) for (i=0; i<sz; i++) rp[i] = pow(tp[i], sp[i]); } else { TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = pow(*t_data, *src_data);); } } void THTensor_(cdiv)(THTensor *r_, THTensor *t, THTensor *src) { THTensor_(resizeAs)(r_, t); if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(isContiguous)(src) && THTensor_(nElement)(r_) == THTensor_(nElement)(src)) { real *tp = THTensor_(data)(t); real *sp = THTensor_(data)(src); real *rp = THTensor_(data)(r_); long sz = THTensor_(nElement)(t); long i; #pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD) private(i) for (i=0; i<sz; i++) rp[i] = tp[i] / sp[i]; } else { TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data / *src_data;); } } void THTensor_(tpow)(THTensor *r_, real value, THTensor *t) { THTensor_(resizeAs)(r_, t); if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(nElement)(r_) == THTensor_(nElement)(t)) { real *tp = THTensor_(data)(t); real *rp = THTensor_(data)(r_); long sz = THTensor_(nElement)(t); long i; #pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD) private(i) for (i=0; i<sz; i++) rp[i] = pow(value, tp[i]); } else { TH_TENSOR_APPLY2(real, r_, real, t, *r__data = pow(value, *t_data);); } } void THTensor_(addcmul)(THTensor *r_, THTensor *t, real value, THTensor *src1, THTensor *src2) { if(r_ != t) { THTensor_(resizeAs)(r_, t); THTensor_(copy)(r_, t); } TH_TENSOR_APPLY3(real, r_, real, src1, real, src2, *r__data += value * *src1_data * *src2_data;); } void THTensor_(addcdiv)(THTensor *r_, THTensor *t, real value, THTensor *src1, THTensor *src2) { if(r_ != t) { THTensor_(resizeAs)(r_, t); THTensor_(copy)(r_, t); } TH_TENSOR_APPLY3(real, r_, real, src1, real, src2, *r__data += value * *src1_data / *src2_data;); } void THTensor_(addmv)(THTensor *r_, real beta, THTensor *t, real alpha, THTensor *mat, THTensor *vec) { if( (mat->nDimension != 2) || (vec->nDimension != 1) ) THError("matrix and vector expected, got %dD, %dD", mat->nDimension, vec->nDimension); if( mat->size[1] != vec->size[0] ) { THDescBuff bm = THTensor_(sizeDesc)(mat); THDescBuff bv = THTensor_(sizeDesc)(vec); THError("size mismatch, %s, %s", bm.str, bv.str); } if(t->nDimension != 1) THError("vector expected, got t: %dD", t->nDimension); if(t->size[0] != mat->size[0]) { THDescBuff bt = THTensor_(sizeDesc)(t); THDescBuff bm = THTensor_(sizeDesc)(mat); THError("size mismatch, t: %s, mat: %s", bt.str, bm.str); } if(r_ != t) { THTensor_(resizeAs)(r_, t); THTensor_(copy)(r_, t); } if(mat->stride[0] == 1) { THBlas_(gemv)('n', mat->size[0], mat->size[1], alpha, THTensor_(data)(mat), mat->stride[1], THTensor_(data)(vec), vec->stride[0], beta, THTensor_(data)(r_), r_->stride[0]); } else if(mat->stride[1] == 1) { THBlas_(gemv)('t', mat->size[1], mat->size[0], alpha, THTensor_(data)(mat), mat->stride[0], THTensor_(data)(vec), vec->stride[0], beta, THTensor_(data)(r_), r_->stride[0]); } else { THTensor *cmat = THTensor_(newContiguous)(mat); THBlas_(gemv)('t', mat->size[1], mat->size[0], alpha, THTensor_(data)(cmat), cmat->stride[0], THTensor_(data)(vec), vec->stride[0], beta, THTensor_(data)(r_), r_->stride[0]); THTensor_(free)(cmat); } } void THTensor_(match)(THTensor *r_, THTensor *m1, THTensor *m2, real gain) { long N1 = m1->size[0]; long N2 = m2->size[0]; long dim; real *m1_p; real *m2_p; real *r_p; long i; THTensor_(resize2d)(r_, N1, N2); m1 = THTensor_(newContiguous)(m1); m2 = THTensor_(newContiguous)(m2); THTensor_(resize2d)(m1, N1, THTensor_(nElement)(m1) / N1); THTensor_(resize2d)(m2, N2, THTensor_(nElement)(m2) / N2); dim = m1->size[1]; THArgCheck(m1->size[1] == m2->size[1], 3, "m1 and m2 must have the same inner vector dim"); m1_p = THTensor_(data)(m1); m2_p = THTensor_(data)(m2); r_p = THTensor_(data)(r_); #pragma omp parallel for private(i) for (i=0; i<N1; i++) { long j,k; for (j=0; j<N2; j++) { real sum = 0; for (k=0; k<dim; k++) { real term = m1_p[ i*dim + k ] - m2_p[ j*dim + k ]; sum += term*term; } r_p[ i*N2 + j ] = gain * sum; } } THTensor_(free)(m1); THTensor_(free)(m2); } void THTensor_(addmm)(THTensor *r_, real beta, THTensor *t, real alpha, THTensor *m1, THTensor *m2) { char transpose_r, transpose_m1, transpose_m2; THTensor *r__, *m1_, *m2_; if( (m1->nDimension != 2) || (m2->nDimension != 2)) THError("matrices expected, got %dD, %dD tensors", m1->nDimension, m2->nDimension); if(m1->size[1] != m2->size[0]) { THDescBuff bm1 = THTensor_(sizeDesc)(m1); THDescBuff bm2 = THTensor_(sizeDesc)(m2); THError("size mismatch, m1: %s, m2: %s", bm1.str, bm2.str); } if( t->nDimension != 2 ) THError("matrix expected, got %dD tensor for t", t->nDimension); if( (t->size[0] != m1->size[0]) || (t->size[1] != m2->size[1]) ) { THDescBuff bt = THTensor_(sizeDesc)(t); THDescBuff bm1 = THTensor_(sizeDesc)(m1); THDescBuff bm2 = THTensor_(sizeDesc)(m2); THError("size mismatch, t: %s, m1: %s, m2: %s", bt.str, bm1.str, bm2.str); } if(t != r_) { THTensor_(resizeAs)(r_, t); THTensor_(copy)(r_, t); } /* printf("%ldx%ld = %ldx%ld X %ldx%ld\n", r_->size[0], r_->size[1], m1->size[0], m1->size[1], m2->size[0], m2->size[1]); */ /* r_ */ if(r_->stride[0] == 1 && r_->stride[1] != 0) { transpose_r = 'n'; r__ = r_; } else if(r_->stride[1] == 1 && r_->stride[0] != 0) { THTensor *swap = m2; m2 = m1; m1 = swap; transpose_r = 't'; r__ = r_; } else { transpose_r = 'n'; r__ = THTensor_(newWithSize2d)(r_->size[1], r_->size[0]); THTensor_(copy)(r__, r_); THTensor_(transpose)(r__, NULL, 0, 1); } /* m1 */ if(m1->stride[(transpose_r == 'n' ? 0 : 1)] == 1 && m1->stride[(transpose_r == 'n' ? 1 : 0)] != 0) { transpose_m1 = 'n'; m1_ = m1; } else if(m1->stride[(transpose_r == 'n' ? 1 : 0)] == 1 && m1->stride[(transpose_r == 'n' ? 0 : 1)] != 0) { transpose_m1 = 't'; m1_ = m1; } else { transpose_m1 = (transpose_r == 'n' ? 't' : 'n'); m1_ = THTensor_(newContiguous)(m1); } /* m2 */ if(m2->stride[(transpose_r == 'n' ? 0 : 1)] == 1 && m2->stride[(transpose_r == 'n' ? 1 : 0)] != 0) { transpose_m2 = 'n'; m2_ = m2; } else if(m2->stride[(transpose_r == 'n' ? 1 : 0)] == 1 && m2->stride[(transpose_r == 'n' ? 0 : 1)] != 0) { transpose_m2 = 't'; m2_ = m2; } else { transpose_m2 = (transpose_r == 'n' ? 't' : 'n'); m2_ = THTensor_(newContiguous)(m2); } /* do the operation */ THBlas_(gemm)(transpose_m1, transpose_m2, r__->size[(transpose_r == 'n' ? 0 : 1)], r__->size[(transpose_r == 'n' ? 1 : 0)], m1_->size[(transpose_r == 'n' ? 1 : 0)], alpha, THTensor_(data)(m1_), (transpose_m1 == 'n' ? m1_->stride[(transpose_r == 'n' ? 1 : 0)] : m1_->stride[(transpose_r == 'n' ? 0 : 1)]), THTensor_(data)(m2_), (transpose_m2 == 'n' ? m2_->stride[(transpose_r == 'n' ? 1 : 0)] : m2_->stride[(transpose_r == 'n' ? 0 : 1)]), beta, THTensor_(data)(r__), r__->stride[(transpose_r == 'n' ? 1 : 0)]); /* free intermediate variables */ if(m1_ != m1) THTensor_(free)(m1_); if(m2_ != m2) THTensor_(free)(m2_); if(r__ != r_) THTensor_(freeCopyTo)(r__, r_); } void THTensor_(addr)(THTensor *r_, real beta, THTensor *t, real alpha, THTensor *vec1, THTensor *vec2) { if( (vec1->nDimension != 1) || (vec2->nDimension != 1) ) THError("vector and vector expected, got %dD, %dD tensors", vec1->nDimension, vec2->nDimension); if(t->nDimension != 2) THError("expected matrix, got %dD tensor for t", t->nDimension); if( (t->size[0] != vec1->size[0]) || (t->size[1] != vec2->size[0]) ) { THDescBuff bt = THTensor_(sizeDesc)(t); THDescBuff bv1 = THTensor_(sizeDesc)(vec1); THDescBuff bv2 = THTensor_(sizeDesc)(vec2); THError("size mismatch, t: %s, vec1: %s, vec2: %s", bt.str, bv1.str, bv2.str); } if(r_ != t) { THTensor_(resizeAs)(r_, t); THTensor_(copy)(r_, t); } if(beta != 1) THTensor_(mul)(r_, r_, beta); if(r_->stride[0] == 1) { THBlas_(ger)(vec1->size[0], vec2->size[0], alpha, THTensor_(data)(vec1), vec1->stride[0], THTensor_(data)(vec2), vec2->stride[0], THTensor_(data)(r_), r_->stride[1]); } else if(r_->stride[1] == 1) { THBlas_(ger)(vec2->size[0], vec1->size[0], alpha, THTensor_(data)(vec2), vec2->stride[0], THTensor_(data)(vec1), vec1->stride[0], THTensor_(data)(r_), r_->stride[0]); } else { THTensor *cr = THTensor_(newClone)(r_); THBlas_(ger)(vec2->size[0], vec1->size[0], alpha, THTensor_(data)(vec2), vec2->stride[0], THTensor_(data)(vec1), vec1->stride[0], THTensor_(data)(cr), cr->stride[0]); THTensor_(freeCopyTo)(cr, r_); } } void THTensor_(addbmm)(THTensor *result, real beta, THTensor *t, real alpha, THTensor *batch1, THTensor *batch2) { long batch; THArgCheck(THTensor_(nDimension)(batch1) == 3, 1, "expected 3D tensor"); THArgCheck(THTensor_(nDimension)(batch2) == 3, 2, "expected 3D tensor"); THArgCheck(THTensor_(size)(batch1, 0) == THTensor_(size)(batch2, 0), 2, "equal number of batches expected, got %d, %d", THTensor_(size)(batch1, 0), THTensor_(size)(batch2, 0)); THArgCheck(THTensor_(size)(batch1, 2) == THTensor_(size)(batch2, 1), 2, "wrong matrix size, batch1: %dx%d, batch2: %dx%d", THTensor_(size)(batch1, 1), THTensor_(size)(batch1,2), THTensor_(size)(batch2, 1), THTensor_(size)(batch2,2)); long dim1 = THTensor_(size)(batch1, 1); long dim2 = THTensor_(size)(batch2, 2); THArgCheck(THTensor_(size)(t, 0) == dim1, 1, "output tensor of incorrect size"); THArgCheck(THTensor_(size)(t, 1) == dim2, 1, "output tensor of incorrect size"); if (t != result) { THTensor_(resizeAs)(result, t); THTensor_(copy)(result, t); } THTensor *matrix1 = THTensor_(new)(); THTensor *matrix2 = THTensor_(new)(); for (batch = 0; batch < THTensor_(size)(batch1, 0); ++batch) { THTensor_(select)(matrix1, batch1, 0, batch); THTensor_(select)(matrix2, batch2, 0, batch); THTensor_(addmm)(result, beta, result, alpha, matrix1, matrix2); beta = 1; // accumulate output once } THTensor_(free)(matrix1); THTensor_(free)(matrix2); } void THTensor_(baddbmm)(THTensor *result, real beta, THTensor *t, real alpha, THTensor *batch1, THTensor *batch2) { long batch; THArgCheck(THTensor_(nDimension)(batch1) == 3, 1, "expected 3D tensor, got %dD", THTensor_(nDimension)(batch1)); THArgCheck(THTensor_(nDimension)(batch2) == 3, 2, "expected 3D tensor, got %dD", THTensor_(nDimension)(batch2)); THArgCheck(THTensor_(size)(batch1, 0) == THTensor_(size)(batch2, 0), 2, "equal number of batches expected, got %d, %d", THTensor_(size)(batch1, 0), THTensor_(size)(batch2, 0)); THArgCheck(THTensor_(size)(batch1, 2) == THTensor_(size)(batch2, 1), 2, "wrong matrix size, batch1: %dx%d, batch2: %dx%d", THTensor_(size)(batch1, 1), THTensor_(size)(batch1, 2), THTensor_(size)(batch2, 1), THTensor_(size)(batch2, 2)); long bs = THTensor_(size)(batch1, 0); long dim1 = THTensor_(size)(batch1, 1); long dim2 = THTensor_(size)(batch2, 2); THArgCheck(THTensor_(size)(t, 0) == bs, 1, "output tensor of incorrect size"); THArgCheck(THTensor_(size)(t, 1) == dim1, 1, "output tensor of incorrect size"); THArgCheck(THTensor_(size)(t, 2) == dim2, 1, "output tensor of incorrect size"); if (t != result) { THTensor_(resizeAs)(result, t); THTensor_(copy)(result, t); } THTensor *matrix1 = THTensor_(new)(); THTensor *matrix2 = THTensor_(new)(); THTensor *result_matrix = THTensor_(new)(); for (batch = 0; batch < THTensor_(size)(batch1, 0); ++batch) { THTensor_(select)(matrix1, batch1, 0, batch); THTensor_(select)(matrix2, batch2, 0, batch); THTensor_(select)(result_matrix, result, 0, batch); THTensor_(addmm)(result_matrix, beta, result_matrix, alpha, matrix1, matrix2); } THTensor_(free)(matrix1); THTensor_(free)(matrix2); THTensor_(free)(result_matrix); } long THTensor_(numel)(THTensor *t) { return THTensor_(nElement)(t); } void THTensor_(max)(THTensor *values_, THLongTensor *indices_, THTensor *t, int dimension) { THLongStorage *dim; real theMax; real value; long theIndex; long i; THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "dimension %d out of range", dimension+1); dim = THTensor_(newSizeOf)(t); THLongStorage_set(dim, dimension, 1); THTensor_(resize)(values_, dim, NULL); THLongTensor_resize(indices_, dim, NULL); THLongStorage_free(dim); TH_TENSOR_DIM_APPLY3(real, t, real, values_, long, indices_, dimension, theMax = t_data[0]; theIndex = 0; for(i = 0; i < t_size; i++) { value = t_data[i*t_stride]; /* This is not the same as value>theMax in the case of NaNs */ if(!(value <= theMax)) { theIndex = i; theMax = value; if (isnan(value)) break; } } *indices__data = theIndex; *values__data = theMax;); } void THTensor_(min)(THTensor *values_, THLongTensor *indices_, THTensor *t, int dimension) { THLongStorage *dim; real theMin; real value; long theIndex; long i; THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "dimension %d out of range", dimension+1); dim = THTensor_(newSizeOf)(t); THLongStorage_set(dim, dimension, 1); THTensor_(resize)(values_, dim, NULL); THLongTensor_resize(indices_, dim, NULL); THLongStorage_free(dim); TH_TENSOR_DIM_APPLY3(real, t, real, values_, long, indices_, dimension, theMin = t_data[0]; theIndex = 0; for(i = 0; i < t_size; i++) { value = t_data[i*t_stride]; /* This is not the same as value<theMin in the case of NaNs */ if(!(value >= theMin)) { theIndex = i; theMin = value; if (isnan(value)) break; } } *indices__data = theIndex; *values__data = theMin;); } void THTensor_(sum)(THTensor *r_, THTensor *t, int dimension) { THLongStorage *dim; THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "dimension %d out of range", dimension+1); dim = THTensor_(newSizeOf)(t); THLongStorage_set(dim, dimension, 1); THTensor_(resize)(r_, dim, NULL); THLongStorage_free(dim); TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension, accreal sum = 0; long i; for(i = 0; i < t_size; i++) sum += t_data[i*t_stride]; *r__data = (real)sum;); } void THTensor_(prod)(THTensor *r_, THTensor *t, int dimension) { THLongStorage *dim; THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "dimension %d out of range", dimension+1); dim = THTensor_(newSizeOf)(t); THLongStorage_set(dim, dimension, 1); THTensor_(resize)(r_, dim, NULL); THLongStorage_free(dim); TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension, accreal prod = 1; long i; for(i = 0; i < t_size; i++) prod *= t_data[i*t_stride]; *r__data = (real)prod;); } void THTensor_(cumsum)(THTensor *r_, THTensor *t, int dimension) { THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "dimension %d out of range", dimension+1); THTensor_(resizeAs)(r_, t); TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension, accreal cumsum = 0; long i; for(i = 0; i < t_size; i++) { cumsum += t_data[i*t_stride]; r__data[i*r__stride] = (real)cumsum; }); } void THTensor_(cumprod)(THTensor *r_, THTensor *t, int dimension) { THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "dimension %d out of range", dimension+1); THTensor_(resizeAs)(r_, t); TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension, accreal cumprod = 1; long i; for(i = 0; i < t_size; i++) { cumprod *= t_data[i*t_stride]; r__data[i*r__stride] = (real)cumprod; }); } void THTensor_(sign)(THTensor *r_, THTensor *t) { THTensor_(resizeAs)(r_, t); #if defined (TH_REAL_IS_BYTE) TH_TENSOR_APPLY2(real, r_, real, t, if (*t_data > 0) *r__data = 1; else *r__data = 0;); #else TH_TENSOR_APPLY2(real, r_, real, t, if (*t_data > 0) *r__data = 1; else if (*t_data < 0) *r__data = -1; else *r__data = 0;); #endif } accreal THTensor_(trace)(THTensor *t) { real *t_data = THTensor_(data)(t); accreal sum = 0; long i = 0; long t_stride_0, t_stride_1, t_diag_size; THArgCheck(THTensor_(nDimension)(t) == 2, 1, "expected a matrix"); t_stride_0 = THTensor_(stride)(t, 0); t_stride_1 = THTensor_(stride)(t, 1); t_diag_size = THMin(THTensor_(size)(t, 0), THTensor_(size)(t, 1)); while(i < t_diag_size) { sum += t_data[i*(t_stride_0+t_stride_1)]; i++; } return sum; } void THTensor_(cross)(THTensor *r_, THTensor *a, THTensor *b, int dimension) { int i; if(THTensor_(nDimension)(a) != THTensor_(nDimension)(b)) THError("inconsistent tensor dimension %dD, %dD", THTensor_(nDimension)(a), THTensor_(nDimension)(b)); for(i = 0; i < THTensor_(nDimension)(a); i++) { if(THTensor_(size)(a, i) != THTensor_(size)(b, i)) { THDescBuff ba = THTensor_(sizeDesc)(a); THDescBuff bb = THTensor_(sizeDesc)(b); THError("inconsistent tensor sizes %s, %s", ba.str, bb.str); } } if(dimension < 0) { for(i = 0; i < THTensor_(nDimension)(a); i++) { if(THTensor_(size)(a, i) == 3) { dimension = i; break; } } if(dimension < 0) { THDescBuff ba = THTensor_(sizeDesc)(a); THError("no dimension of size 3 in a: %s", ba.str); } } THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(a), 3, "dimension %d out of range", dimension+1); THArgCheck(THTensor_(size)(a, dimension) == 3, 3, "dimension %d does not have size 3", dimension+1); THTensor_(resizeAs)(r_, a); TH_TENSOR_DIM_APPLY3(real, a, real, b, real, r_, dimension, r__data[0*r__stride] = a_data[1*a_stride]*b_data[2*b_stride] - a_data[2*a_stride]*b_data[1*b_stride]; r__data[1*r__stride] = a_data[2*a_stride]*b_data[0*b_stride] - a_data[0*a_stride]*b_data[2*b_stride]; r__data[2*r__stride] = a_data[0*a_stride]*b_data[1*b_stride] - a_data[1*a_stride]*b_data[0*b_stride];); } void THTensor_(cmax)(THTensor *r, THTensor *t, THTensor *src) { THTensor_(resizeAs)(r, t); TH_TENSOR_APPLY3(real, r, real, t, real, src, *r_data = *t_data > *src_data ? *t_data : *src_data;); } void THTensor_(cmin)(THTensor *r, THTensor *t, THTensor *src) { THTensor_(resizeAs)(r, t); TH_TENSOR_APPLY3(real, r, real, t, real, src, *r_data = *t_data < *src_data ? *t_data : *src_data;); } void THTensor_(cmaxValue)(THTensor *r, THTensor *t, real value) { THTensor_(resizeAs)(r, t); TH_TENSOR_APPLY2(real, r, real, t, *r_data = *t_data > value ? *t_data : value;); } void THTensor_(cminValue)(THTensor *r, THTensor *t, real value) { THTensor_(resizeAs)(r, t); TH_TENSOR_APPLY2(real, r, real, t, *r_data = *t_data < value ? *t_data : value;); } void THTensor_(zeros)(THTensor *r_, THLongStorage *size) { THTensor_(resize)(r_, size, NULL); THTensor_(zero)(r_); } void THTensor_(ones)(THTensor *r_, THLongStorage *size) { THTensor_(resize)(r_, size, NULL); THTensor_(fill)(r_, 1); } void THTensor_(diag)(THTensor *r_, THTensor *t, int k) { THArgCheck(THTensor_(nDimension)(t) == 1 || THTensor_(nDimension)(t) == 2, 1, "matrix or a vector expected"); if(THTensor_(nDimension)(t) == 1) { real *t_data = THTensor_(data)(t); long t_stride_0 = THTensor_(stride)(t, 0); long t_size = THTensor_(size)(t, 0); long sz = t_size + (k >= 0 ? k : -k); real *r__data; long r__stride_0; long r__stride_1; long i; THTensor_(resize2d)(r_, sz, sz); THTensor_(zero)(r_); r__data = THTensor_(data)(r_); r__stride_0 = THTensor_(stride)(r_, 0); r__stride_1 = THTensor_(stride)(r_, 1); r__data += (k >= 0 ? k*r__stride_1 : -k*r__stride_0); for(i = 0; i < t_size; i++) r__data[i*(r__stride_0+r__stride_1)] = t_data[i*t_stride_0]; } else { real *t_data = THTensor_(data)(t); long t_stride_0 = THTensor_(stride)(t, 0); long t_stride_1 = THTensor_(stride)(t, 1); long sz; real *r__data; long r__stride_0; long i; if(k >= 0) sz = THMin(THTensor_(size)(t, 0), THTensor_(size)(t, 1)-k); else sz = THMin(THTensor_(size)(t, 0)+k, THTensor_(size)(t, 1)); THTensor_(resize1d)(r_, sz); r__data = THTensor_(data)(r_); r__stride_0 = THTensor_(stride)(r_, 0); t_data += (k >= 0 ? k*t_stride_1 : -k*t_stride_0); for(i = 0; i < sz; i++) r__data[i*r__stride_0] = t_data[i*(t_stride_0+t_stride_1)]; } } void THTensor_(eye)(THTensor *r_, long n, long m) { real *r__data; long i, sz; THArgCheck(n > 0, 1, "invalid argument"); if(m <= 0) m = n; THTensor_(resize2d)(r_, n, m); THTensor_(zero)(r_); i = 0; r__data = THTensor_(data)(r_); sz = THMin(THTensor_(size)(r_, 0), THTensor_(size)(r_, 1)); for(i = 0; i < sz; i++) r__data[i*(r_->stride[0]+r_->stride[1])] = 1; } void THTensor_(range)(THTensor *r_, accreal xmin, accreal xmax, accreal step) { long size; real i = 0; THArgCheck(step > 0 || step < 0, 3, "step must be a non-null number"); THArgCheck(((step > 0) && (xmax >= xmin)) || ((step < 0) && (xmax <= xmin)) , 2, "upper bound and larger bound incoherent with step sign"); size = (long)((xmax/step - xmin/step)+1); THTensor_(resize1d)(r_, size); TH_TENSOR_APPLY(real, r_, *r__data = xmin + (i++)*step;); } void THTensor_(randperm)(THTensor *r_, THGenerator *_generator, long n) { real *r__data; long r__stride_0; long i; THArgCheck(n > 0, 1, "must be strictly positive"); THTensor_(resize1d)(r_, n); r__data = THTensor_(data)(r_); r__stride_0 = THTensor_(stride)(r_,0); for(i = 0; i < n; i++) r__data[i*r__stride_0] = (real)(i); for(i = 0; i < n-1; i++) { long z = THRandom_random(_generator) % (n-i); real sav = r__data[i*r__stride_0]; r__data[i*r__stride_0] = r__data[(z+i)*r__stride_0]; r__data[(z+i)*r__stride_0] = sav; } } void THTensor_(reshape)(THTensor *r_, THTensor *t, THLongStorage *size) { THTensor_(resize)(r_, size, NULL); THTensor_(copy)(r_, t); } /* I cut and pasted (slightly adapted) the quicksort code from Sedgewick's 1978 "Implementing Quicksort Programs" article http://www.csie.ntu.edu.tw/~b93076/p847-sedgewick.pdf It is the state of the art existing implementation. The macros are here to make as close a match as possible to the pseudocode of Program 2 p.851 Note that other partition schemes exist, and are typically presented in textbook, but those are less efficient. See e.g. http://cs.stackexchange.com/questions/11458/quicksort-partitioning-hoare-vs-lomuto Julien, November 12th 2013 */ #define MAX_LEVELS 300 #define M_SMALL 10 /* Limit for small subfiles */ #define ARR(III) arr[(III)*stride] #define IDX(III) idx[(III)*stride] #define LONG_SWAP(AAA, BBB) swap = AAA; AAA = BBB; BBB = swap #define REAL_SWAP(AAA, BBB) rswap = AAA; AAA = BBB; BBB = rswap #define BOTH_SWAP(III, JJJ) \ REAL_SWAP(ARR(III), ARR(JJJ)); \ LONG_SWAP(IDX(III), IDX(JJJ)) static void THTensor_(quicksortascend)(real *arr, long *idx, long elements, long stride) { long beg[MAX_LEVELS], end[MAX_LEVELS], i, j, L, R, P, swap, pid, stack = 0, sz_right, sz_left; real rswap, piv; unsigned char done = 0; /* beg[0]=0; end[0]=elements; */ stack = 0; L = 0; R = elements-1; done = elements-1 <= M_SMALL; while(!done) { /* Use median of three for pivot choice */ P=(L+R)>>1; BOTH_SWAP(P, L+1); if (ARR(L+1) > ARR(R)) { BOTH_SWAP(L+1, R); } if (ARR(L) > ARR(R)) { BOTH_SWAP(L, R); } if (ARR(L+1) > ARR(L)) { BOTH_SWAP(L+1, L); } i = L+1; j = R; piv = ARR(L); pid = IDX(L); do { do { i = i+1; } while(ARR(i) < piv); do { j = j-1; } while(ARR(j) > piv); if (j < i) break; BOTH_SWAP(i, j); } while(1); BOTH_SWAP(L, j); /* Left subfile is (L, j-1) */ /* Right subfile is (i, R) */ sz_left = j-L; sz_right = R-i+1; if (sz_left <= M_SMALL && sz_right <= M_SMALL) { /* both subfiles are small */ /* if stack empty */ if (stack == 0) { done = 1; } else { stack--; L = beg[stack]; R = end[stack]; } } else if (sz_left <= M_SMALL || sz_right <= M_SMALL) { /* exactly one of the subfiles is small */ /* (L,R) = large subfile */ if (sz_left > sz_right) { /* Implicit: L = L; */ R = j-1; } else { L = i; /* Implicit: R = R; */ } } else { /* none of the subfiles is small */ /* push large subfile */ /* (L,R) = small subfile */ if (sz_left > sz_right) { beg[stack] = L; end[stack] = j-1; stack++; L = i; /* Implicit: R = R */ } else { beg[stack] = i; end[stack] = R; stack++; /* Implicit: L = L; */ R = j-1; } } } /* while not done */ /* Now insertion sort on the concatenation of subfiles */ for(i=elements-2; i>=0; i--) { if (ARR(i) > ARR(i+1)) { piv = ARR(i); pid = IDX(i); j = i+1; do { ARR(j-1) = ARR(j); IDX(j-1) = IDX(j); j = j+1; } while(j < elements && ARR(j) < piv); ARR(j-1) = piv; IDX(j-1) = pid; } } } static void THTensor_(quicksortdescend)(real *arr, long *idx, long elements, long stride) { long beg[MAX_LEVELS], end[MAX_LEVELS], i, j, L, R, P, swap, pid, stack = 0, sz_right, sz_left; real rswap, piv; unsigned char done = 0; /* beg[0]=0; end[0]=elements; */ stack = 0; L = 0; R = elements-1; done = elements-1 <= M_SMALL; while(!done) { /* Use median of three for pivot choice */ P=(L+R)>>1; BOTH_SWAP(P, L+1); if (ARR(L+1) < ARR(R)) { BOTH_SWAP(L+1, R); } if (ARR(L) < ARR(R)) { BOTH_SWAP(L, R); } if (ARR(L+1) < ARR(L)) { BOTH_SWAP(L+1, L); } i = L+1; j = R; piv = ARR(L); pid = IDX(L); do { do { i = i+1; } while(ARR(i) > piv); do { j = j-1; } while(ARR(j) < piv); if (j < i) break; BOTH_SWAP(i, j); } while(1); BOTH_SWAP(L, j); /* Left subfile is (L, j-1) */ /* Right subfile is (i, R) */ sz_left = j-L; sz_right = R-i+1; if (sz_left <= M_SMALL && sz_right <= M_SMALL) { /* both subfiles are small */ /* if stack empty */ if (stack == 0) { done = 1; } else { stack--; L = beg[stack]; R = end[stack]; } } else if (sz_left <= M_SMALL || sz_right <= M_SMALL) { /* exactly one of the subfiles is small */ /* (L,R) = large subfile */ if (sz_left > sz_right) { /* Implicit: L = L; */ R = j-1; } else { L = i; /* Implicit: R = R; */ } } else { /* none of the subfiles is small */ /* push large subfile */ /* (L,R) = small subfile */ if (sz_left > sz_right) { beg[stack] = L; end[stack] = j-1; stack++; L = i; /* Implicit: R = R */ } else { beg[stack] = i; end[stack] = R; stack++; /* Implicit: L = L; */ R = j-1; } } } /* while not done */ /* Now insertion sort on the concatenation of subfiles */ for(i=elements-2; i>=0; i--) { if (ARR(i) < ARR(i+1)) { piv = ARR(i); pid = IDX(i); j = i+1; do { ARR(j-1) = ARR(j); IDX(j-1) = IDX(j); j = j+1; } while(j < elements && ARR(j) > piv); ARR(j-1) = piv; IDX(j-1) = pid; } } } #undef MAX_LEVELS #undef M_SMALL void THTensor_(sort)(THTensor *rt_, THLongTensor *ri_, THTensor *t, int dimension, int descendingOrder) { THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "invalid dimension %d", dimension+1); THTensor_(resizeAs)(rt_, t); THTensor_(copy)(rt_, t); { THLongStorage *size = THTensor_(newSizeOf)(t); THLongTensor_resize(ri_, size, NULL); THLongStorage_free(size); } if(descendingOrder) { TH_TENSOR_DIM_APPLY2(real, rt_, long, ri_, dimension, long i; for(i = 0; i < ri__size; i++) ri__data[i*ri__stride] = i; THTensor_(quicksortdescend)(rt__data, ri__data, rt__size, rt__stride);) } else { TH_TENSOR_DIM_APPLY2(real, rt_, long, ri_, dimension, long i; for(i = 0; i < ri__size; i++) ri__data[i*ri__stride] = i; THTensor_(quicksortascend)(rt__data, ri__data, rt__size, rt__stride);) } } /* Implementation of the Quickselect algorithm, based on Nicolas Devillard's public domain implementation at http://ndevilla.free.fr/median/median/ Adapted similarly to the above Quicksort algorithm. */ static void THTensor_(quickselect)(real *arr, long *idx, long k, long elements, long stride) { long P, L, R, i, j, swap, pid; real rswap, piv; L = 0; R = elements-1; do { if (R <= L) /* One element only */ return; if (R == L+1) { /* Two elements only */ if (ARR(L) > ARR(R)) { BOTH_SWAP(L, R); } return; } /* Use median of three for pivot choice */ P=(L+R)>>1; BOTH_SWAP(P, L+1); if (ARR(L+1) > ARR(R)) { BOTH_SWAP(L+1, R); } if (ARR(L) > ARR(R)) { BOTH_SWAP(L, R); } if (ARR(L+1) > ARR(L)) { BOTH_SWAP(L+1, L); } i = L+1; j = R; piv = ARR(L); pid = IDX(L); do { do i++; while(ARR(i) < piv); do j--; while(ARR(j) > piv); if (j < i) break; BOTH_SWAP(i, j); } while(1); BOTH_SWAP(L, j); /* Re-set active partition */ if (j <= k) L=i; if (j >= k) R=j-1; } while(1); } #undef ARR #undef IDX #undef LONG_SWAP #undef REAL_SWAP #undef BOTH_SWAP void THTensor_(kthvalue)(THTensor *values_, THLongTensor *indices_, THTensor *t, long k, int dimension) { THLongStorage *dim; THTensor *temp_; THLongTensor *tempi_; real *temp__data; long *tempi__data; long t_size_dim; THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 3, "dimension out of range"); THArgCheck(k >= 0 && k < t->size[dimension], 2, "selected index out of range"); dim = THTensor_(newSizeOf)(t); THLongStorage_set(dim, dimension, 1); THTensor_(resize)(values_, dim, NULL); THLongTensor_resize(indices_, dim, NULL); THLongStorage_free(dim); t_size_dim = THTensor_(size)(t, dimension); temp_ = THTensor_(new)(); THTensor_(resize1d)(temp_, t_size_dim); temp__data = THTensor_(data)(temp_); tempi_ = THLongTensor_new(); THLongTensor_resize1d(tempi_, t_size_dim); tempi__data = THLongTensor_data(tempi_); TH_TENSOR_DIM_APPLY3(real, t, real, values_, long, indices_, dimension, long i; for(i = 0; i < t_size_dim; i++) temp__data[i] = t_data[i*t_stride]; for(i = 0; i < t_size_dim; i++) tempi__data[i] = i; THTensor_(quickselect)(temp__data, tempi__data, k, t_size_dim, 1); *values__data = temp__data[k]; *indices__data = tempi__data[k];); THTensor_(free)(temp_); THLongTensor_free(tempi_); } void THTensor_(median)(THTensor *values_, THLongTensor *indices_, THTensor *t, int dimension) { long t_size_dim, k; THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 3, "dimension out of range"); t_size_dim = THTensor_(size)(t, dimension); k = (t_size_dim-1) >> 1; /* take middle or one-before-middle element */ THTensor_(kthvalue)(values_, indices_, t, k, dimension); } void THTensor_(tril)(THTensor *r_, THTensor *t, long k) { long t_size_0, t_size_1; long t_stride_0, t_stride_1; long r__stride_0, r__stride_1; real *t_data, *r__data; long r, c; THArgCheck(THTensor_(nDimension)(t) == 2, 1, "expected a matrix"); THTensor_(resizeAs)(r_, t); t_size_0 = THTensor_(size)(t, 0); t_size_1 = THTensor_(size)(t, 1); t_stride_0 = THTensor_(stride)(t, 0); t_stride_1 = THTensor_(stride)(t, 1); r__stride_0 = THTensor_(stride)(r_, 0); r__stride_1 = THTensor_(stride)(r_, 1); r__data = THTensor_(data)(r_); t_data = THTensor_(data)(t); for(r = 0; r < t_size_0; r++) { long sz = THMin(r+k+1, t_size_1); for(c = THMax(0, r+k); c < t_size_1; c++) r__data[r*r__stride_0+c*r__stride_1] = 0; for(c = 0; c < sz; c++) r__data[r*r__stride_0+c*r__stride_1] = t_data[r*t_stride_0+c*t_stride_1]; } } void THTensor_(triu)(THTensor *r_, THTensor *t, long k) { long t_size_0, t_size_1; long t_stride_0, t_stride_1; long r__stride_0, r__stride_1; real *t_data, *r__data; long r, c; THArgCheck(THTensor_(nDimension)(t) == 2, 1, "expected a matrix"); THTensor_(resizeAs)(r_, t); t_size_0 = THTensor_(size)(t, 0); t_size_1 = THTensor_(size)(t, 1); t_stride_0 = THTensor_(stride)(t, 0); t_stride_1 = THTensor_(stride)(t, 1); r__stride_0 = THTensor_(stride)(r_, 0); r__stride_1 = THTensor_(stride)(r_, 1); r__data = THTensor_(data)(r_); t_data = THTensor_(data)(t); for(r = 0; r < t_size_0; r++) { long sz = THMin(r+k, t_size_1); for(c = THMax(0, r+k); c < t_size_1; c++) r__data[r*r__stride_0+c*r__stride_1] = t_data[r*t_stride_0+c*t_stride_1]; for(c = 0; c < sz; c++) r__data[r*r__stride_0+c*r__stride_1] = 0; } } void THTensor_(cat)(THTensor *r_, THTensor *ta, THTensor *tb, int dimension) { THLongStorage *size; int i; int ndim = THMax(ta->nDimension, tb->nDimension); ndim = THMax(ndim, dimension+1); THArgCheck(dimension >= 0, 4, "invalid dimension %d", dimension+1); size = THLongStorage_newWithSize(ndim); for(i = 0; i < ndim; i++) { long tadi = (i < ta->nDimension ? ta->size[i] : 1); long tbdi = (i < tb->nDimension ? tb->size[i] : 1); if(i == dimension) size->data[i] = tadi+tbdi; else { if(tadi != tbdi) { THLongStorage_free(size); THError("inconsistent tensor sizes"); } size->data[i] = tadi; } } THTensor_(resize)(r_, size, NULL); THLongStorage_free(size); { THTensor *nta = THTensor_(newWithTensor)(r_); THTensor_(narrow)(nta, NULL, dimension, 0, (dimension < ta->nDimension ? ta->size[dimension] : 1)); THTensor_(copy)(nta, ta); THTensor_(free)(nta); } { THTensor *ntb = THTensor_(newWithTensor)(r_); THTensor_(narrow)(ntb, NULL, dimension, (dimension < ta->nDimension ? ta->size[dimension] : 1), (dimension < tb->nDimension ? tb->size[dimension] : 1)); THTensor_(copy)(ntb, tb); THTensor_(free)(ntb); } } #define TENSOR_IMPLEMENT_LOGICAL(NAME,OP) \ void THTensor_(NAME##Value)(THByteTensor *r_, THTensor* t, real value) \ { \ THByteTensor_rawResize(r_, t->nDimension, t->size, NULL); \ THByteTensor_zero(r_); \ TH_TENSOR_APPLY2(unsigned char, r_, real, t, \ if (*t_data OP value) *r__data = 1;); \ } \ void THTensor_(NAME##ValueT)(THTensor* r_, THTensor* t, real value) \ { \ THTensor_(rawResize)(r_, t->nDimension, t->size, NULL); \ THTensor_(zero)(r_); \ TH_TENSOR_APPLY2(real, r_, real, t, \ if (*t_data OP value) *r__data = 1;); \ } \ void THTensor_(NAME##Tensor)(THByteTensor *r_, THTensor *ta, THTensor *tb) \ { \ THByteTensor_rawResize(r_, ta->nDimension, ta->size, NULL); \ THByteTensor_zero(r_); \ TH_TENSOR_APPLY3(unsigned char, r_, real, ta, real, tb, \ if(*ta_data OP *tb_data) *r__data = 1;); \ } \ void THTensor_(NAME##TensorT)(THTensor *r_, THTensor *ta, THTensor *tb) \ { \ THTensor_(rawResize)(r_, ta->nDimension, ta->size, NULL); \ THTensor_(zero)(r_); \ TH_TENSOR_APPLY3(real, r_, real, ta, real, tb, \ if(*ta_data OP *tb_data) *r__data = 1;); \ } \ TENSOR_IMPLEMENT_LOGICAL(lt,<) TENSOR_IMPLEMENT_LOGICAL(gt,>) TENSOR_IMPLEMENT_LOGICAL(le,<=) TENSOR_IMPLEMENT_LOGICAL(ge,>=) TENSOR_IMPLEMENT_LOGICAL(eq,==) TENSOR_IMPLEMENT_LOGICAL(ne,!=) #define LAB_IMPLEMENT_BASIC_FUNCTION(NAME, CFUNC) \ void THTensor_(NAME)(THTensor *r_, THTensor *t) \ { \ THTensor_(resizeAs)(r_, t); \ TH_TENSOR_APPLY2(real, t, real, r_, *r__data = CFUNC(*t_data);); \ } \ #define LAB_IMPLEMENT_BASIC_FUNCTION_VALUE(NAME, CFUNC) \ void THTensor_(NAME)(THTensor *r_, THTensor *t, real value) \ { \ THTensor_(resizeAs)(r_, t); \ TH_TENSOR_APPLY2(real, t, real, r_, *r__data = CFUNC(*t_data, value);); \ } \ #if defined(TH_REAL_IS_LONG) LAB_IMPLEMENT_BASIC_FUNCTION(abs,labs) #endif /* long only part */ #if defined(TH_REAL_IS_INT) LAB_IMPLEMENT_BASIC_FUNCTION(abs,abs) #endif /* int only part */ #if defined(TH_REAL_IS_BYTE) #define TENSOR_IMPLEMENT_LOGICAL_SUM(NAME, OP, INIT_VALUE) \ int THTensor_(NAME)(THTensor *tensor) \ { \ THArgCheck(tensor->nDimension > 0, 1, "empty Tensor"); \ int sum = INIT_VALUE; \ TH_TENSOR_APPLY(real, tensor, sum OP *tensor_data;); \ return sum; \ } TENSOR_IMPLEMENT_LOGICAL_SUM(logicalall, &=, 1) TENSOR_IMPLEMENT_LOGICAL_SUM(logicalany, |=, 0) #endif /* Byte only part */ /* floating point only now */ #if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) LAB_IMPLEMENT_BASIC_FUNCTION(log,log) LAB_IMPLEMENT_BASIC_FUNCTION(log1p,log1p) LAB_IMPLEMENT_BASIC_FUNCTION(exp,exp) LAB_IMPLEMENT_BASIC_FUNCTION(cos,cos) LAB_IMPLEMENT_BASIC_FUNCTION(acos,acos) LAB_IMPLEMENT_BASIC_FUNCTION(cosh,cosh) LAB_IMPLEMENT_BASIC_FUNCTION(sin,sin) LAB_IMPLEMENT_BASIC_FUNCTION(asin,asin) LAB_IMPLEMENT_BASIC_FUNCTION(sinh,sinh) LAB_IMPLEMENT_BASIC_FUNCTION(tan,tan) LAB_IMPLEMENT_BASIC_FUNCTION(atan,atan) LAB_IMPLEMENT_BASIC_FUNCTION(tanh,tanh) LAB_IMPLEMENT_BASIC_FUNCTION_VALUE(pow,pow) LAB_IMPLEMENT_BASIC_FUNCTION(sqrt,sqrt) LAB_IMPLEMENT_BASIC_FUNCTION(ceil,ceil) LAB_IMPLEMENT_BASIC_FUNCTION(floor,floor) LAB_IMPLEMENT_BASIC_FUNCTION(round,round) LAB_IMPLEMENT_BASIC_FUNCTION(abs,fabs) void THTensor_(atan2)(THTensor *r_, THTensor *tx, THTensor *ty) { THTensor_(resizeAs)(r_, tx); TH_TENSOR_APPLY3(real, r_, real, tx, real, ty, *r__data = atan2(*tx_data,*ty_data);); } void THTensor_(mean)(THTensor *r_, THTensor *t, int dimension) { THLongStorage *dim; THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "invalid dimension %d", dimension+1); dim = THTensor_(newSizeOf)(t); THLongStorage_set(dim, dimension, 1); THTensor_(resize)(r_, dim, NULL); THLongStorage_free(dim); TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension, accreal sum = 0; long i; for(i = 0; i < t_size; i++) sum += t_data[i*t_stride]; *r__data = (real)sum/t_size;); } void THTensor_(std)(THTensor *r_, THTensor *t, int dimension, int flag) { THLongStorage *dim; THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 3, "invalid dimension %d", dimension+1); dim = THTensor_(newSizeOf)(t); THLongStorage_set(dim, dimension, 1); THTensor_(resize)(r_, dim, NULL); THLongStorage_free(dim); TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension, accreal sum = 0; accreal sum2 = 0; long i; for(i = 0; i < t_size; i++) { real z = t_data[i*t_stride]; sum += z; sum2 += z*z; } if(flag) { sum /= t_size; sum2 /= t_size; sum2 -= sum*sum; sum2 = (sum2 < 0 ? 0 : sum2); *r__data = (real)sqrt(sum2); } else { sum /= t_size; sum2 /= t_size-1; sum2 -= ((real)t_size)/((real)(t_size-1))*sum*sum; sum2 = (sum2 < 0 ? 0 : sum2); *r__data = (real)sqrt(sum2); }); } void THTensor_(var)(THTensor *r_, THTensor *t, int dimension, int flag) { THLongStorage *dim; THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 3, "invalid dimension %d", dimension+1); dim = THTensor_(newSizeOf)(t); THLongStorage_set(dim, dimension, 1); THTensor_(resize)(r_, dim, NULL); THLongStorage_free(dim); TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension, accreal sum = 0; accreal sum2 = 0; long i; for(i = 0; i < t_size; i++) { real z = t_data[i*t_stride]; sum += z; sum2 += z*z; } if(flag) { sum /= t_size; sum2 /= t_size; sum2 -= sum*sum; sum2 = (sum2 < 0 ? 0 : sum2); *r__data = sum2; } else { sum /= t_size; sum2 /= t_size-1; sum2 -= ((real)t_size)/((real)(t_size-1))*sum*sum; sum2 = (sum2 < 0 ? 0 : sum2); *r__data = (real)sum2; }); } void THTensor_(norm)(THTensor *r_, THTensor *t, real value, int dimension) { THLongStorage *dim; THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 3, "invalid dimension %d", dimension+1); dim = THTensor_(newSizeOf)(t); THLongStorage_set(dim, dimension, 1); THTensor_(resize)(r_, dim, NULL); THLongStorage_free(dim); if(value == 0) { TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension, accreal sum = 0; long i; for(i = 0; i < t_size; i++) sum += t_data[i*t_stride] != 0.0; *r__data = sum;) } else { TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension, accreal sum = 0; long i; for(i = 0; i < t_size; i++) sum += pow(fabs(t_data[i*t_stride]), value); *r__data = pow(sum, 1.0/value);) } } accreal THTensor_(normall)(THTensor *tensor, real value) { accreal sum = 0; if(value == 0) { TH_TENSOR_APPLY(real, tensor, sum += *tensor_data != 0.0;); return sum; } else if(value == 1) { TH_TENSOR_APPLY(real, tensor, sum += fabs(*tensor_data);); return sum; } else if(value == 2) { TH_TENSOR_APPLY(real, tensor, accreal z = *tensor_data; sum += z*z;); return sqrt(sum); } else { TH_TENSOR_APPLY(real, tensor, sum += pow(fabs(*tensor_data), value);); return pow(sum, 1.0/value); } } void THTensor_(renorm)(THTensor *res, THTensor *src, real value, int dimension, real maxnorm) { int i; THTensor *rowR, *rowS; THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(src), 3, "invalid dimension %d", dimension+1); THArgCheck(value > 0, 2, "non-positive-norm not supported"); THArgCheck(THTensor_(nDimension)(src) > 1, 1, "need at least 2 dimensions, got %d dimensions", THTensor_(nDimension)(src)); rowR = THTensor_(new)(); rowS = THTensor_(new)(); THTensor_(resizeAs)(res, src); for (i=0; i<src->size[dimension]; i++) { real norm = 0; real new_norm; THTensor_(select)(rowS, src, dimension, i); THTensor_(select)(rowR, res, dimension, i); if (value == 1) { TH_TENSOR_APPLY(real, rowS, norm += fabs(*rowS_data);); } else if (value == 2) { TH_TENSOR_APPLY(real, rowS, accreal z = *rowS_data; norm += z*z;); } else { TH_TENSOR_APPLY(real, rowS, norm += pow(fabs(*rowS_data), value);); } norm = pow(norm, 1/value); if (norm > maxnorm) { new_norm = maxnorm / (norm + 1e-7); TH_TENSOR_APPLY2( real, rowR, real, rowS, *rowR_data = (*rowS_data) * new_norm; ) } else THTensor_(copy)(rowR, rowS); } THTensor_(free)(rowR); THTensor_(free)(rowS); } accreal THTensor_(dist)(THTensor *tensor, THTensor *src, real value) { real sum = 0; TH_TENSOR_APPLY2(real, tensor, real, src, sum += pow(fabs(*tensor_data - *src_data), value);) return pow(sum, 1.0/value); } accreal THTensor_(meanall)(THTensor *tensor) { THArgCheck(tensor->nDimension > 0, 1, "empty Tensor"); return THTensor_(sumall)(tensor)/THTensor_(nElement)(tensor); } accreal THTensor_(varall)(THTensor *tensor) { accreal mean = THTensor_(meanall)(tensor); accreal sum = 0; TH_TENSOR_APPLY(real, tensor, sum += (*tensor_data - mean)*(*tensor_data - mean);); sum /= (THTensor_(nElement)(tensor)-1); return sum; } accreal THTensor_(stdall)(THTensor *tensor) { return sqrt(THTensor_(varall)(tensor)); } void THTensor_(linspace)(THTensor *r_, real a, real b, long n) { real i = 0; THArgCheck(n > 1 || (n == 1 && (a == b)), 3, "invalid number of points"); THArgCheck(a <= b, 2, "end range should be greater than start range"); THTensor_(resize1d)(r_, n); if(n == 1) { TH_TENSOR_APPLY(real, r_, *r__data = a; i++; ); } else { TH_TENSOR_APPLY(real, r_, *r__data = a + i*(b-a)/((real)(n-1)); i++; ); } } void THTensor_(logspace)(THTensor *r_, real a, real b, long n) { real i = 0; THArgCheck(n > 1 || (n == 1 && (a == b)), 3, "invalid number of points"); THArgCheck(a <= b, 2, "end range should be greater than start range"); THTensor_(resize1d)(r_, n); if(n == 1) { TH_TENSOR_APPLY(real, r_, *r__data = pow(10.0, a); i++; ); } else { TH_TENSOR_APPLY(real, r_, *r__data = pow(10.0, a + i*(b-a)/((real)(n-1))); i++; ); } } void THTensor_(rand)(THTensor *r_, THGenerator *_generator, THLongStorage *size) { THTensor_(resize)(r_, size, NULL); THTensor_(uniform)(r_, _generator, 0, 1); } void THTensor_(randn)(THTensor *r_, THGenerator *_generator, THLongStorage *size) { THTensor_(resize)(r_, size, NULL); THTensor_(normal)(r_, _generator, 0, 1); } void THTensor_(histc)(THTensor *hist, THTensor *tensor, long nbins, real minvalue, real maxvalue) { THTensor *clone; real minval; real maxval; real bins; real *h_data; THTensor_(resize1d)(hist, nbins); THTensor_(zero)(hist); minval = minvalue; maxval = maxvalue; if (minval == maxval) { minval = THTensor_(minall)(tensor); maxval = THTensor_(maxall)(tensor); } if (minval == maxval) { minval = minval - 1; maxval = maxval + 1; } bins = (real)(nbins)-1e-6; clone = THTensor_(newWithSize1d)(THTensor_(nElement)(tensor)); THTensor_(copy)(clone,tensor); THTensor_(add)(clone, clone, -minval); THTensor_(div)(clone, clone, (maxval-minval)); THTensor_(mul)(clone, clone, bins); THTensor_(floor)(clone, clone); THTensor_(add)(clone, clone, 1); h_data = THTensor_(data)(hist); TH_TENSOR_APPLY(real, clone, \ if ((*clone_data <= nbins) && (*clone_data >= 1)) { \ *(h_data + (int)(*clone_data) - 1) += 1; \ }); THTensor_(free)(clone); } #endif /* floating point only part */ #endif
GB_unaryop__identity_fp32_uint32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_fp32_uint32 // op(A') function: GB_tran__identity_fp32_uint32 // C type: float // A type: uint32_t // cast: float cij = (float) aij // unaryop: cij = aij #define GB_ATYPE \ uint32_t #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ float z = (float) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FP32 || GxB_NO_UINT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_fp32_uint32 ( float *restrict Cx, const uint32_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_fp32_uint32 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
Square.c
#ifndef TH_GENERIC_FILE #define TH_GENERIC_FILE "generic/Square.c" #else void THNN_(Square_updateOutput)( THNNState *state, THTensor *input, THTensor *output) { THTensor_(resizeAs)(output, input); if (input->nDimension == 1 || !THTensor_(isContiguous)(input) || !THTensor_(isContiguous)(output)) { TH_TENSOR_APPLY2(real, output, real, input, *output_data = (*input_data) * (*input_data); ); } else { real *output_data = THTensor_(data)(output); real *input_data = THTensor_(data)(input); long i; #pragma omp parallel for private(i) for (i = 0; i < THTensor_(nElement)(input); i++) output_data[i] = input_data[i]*input_data[i]; } } void THNN_(Square_updateGradInput)( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput) { THNN_CHECK_SHAPE(input, gradOutput); THTensor_(resizeAs)(gradInput, input); if (input->nDimension == 1 || !THTensor_(isContiguous)(input) || !THTensor_(isContiguous)(gradOutput) || !THTensor_(isContiguous)(gradInput)) { TH_TENSOR_APPLY3(real, gradInput, real, gradOutput, real, input, *gradInput_data = 2.0 * (*gradOutput_data) * (*input_data); ); } else { real *gradOutput_data = THTensor_(data)(gradOutput); real *gradInput_data = THTensor_(data)(gradInput); real *input_data = THTensor_(data)(input); long i; #pragma omp parallel for private(i) for (i = 0; i < THTensor_(nElement)(gradInput); i++) gradInput_data[i] = 2.0 * gradOutput_data[i] * input_data[i]; } } #endif
kiss_fft.c
/* * Copyright (c) 2003-2010, Mark Borgerding. All rights reserved. * This file is part of KISS FFT - https://github.com/mborgerding/kissfft * * SPDX-License-Identifier: BSD-3-Clause * See COPYING file for more information. */ #include "_kiss_fft_guts.h" /* The guts header contains all the multiplication and addition macros that are defined for fixed or floating point complex numbers. It also delares the kf_ internal functions. */ static void kf_bfly2(kiss_fft_cpx* Fout, const size_t fstride, const kiss_fft_cfg st, int m) { kiss_fft_cpx* Fout2; kiss_fft_cpx* tw1 = st->twiddles; kiss_fft_cpx t; Fout2 = Fout + m; do { C_FIXDIV(*Fout, 2); C_FIXDIV(*Fout2, 2); C_MUL(t, *Fout2, *tw1); tw1 += fstride; C_SUB(*Fout2, *Fout, t); C_ADDTO(*Fout, t); ++Fout2; ++Fout; } while (--m); } static void kf_bfly4(kiss_fft_cpx* Fout, const size_t fstride, const kiss_fft_cfg st, const size_t m) { kiss_fft_cpx *tw1, *tw2, *tw3; kiss_fft_cpx scratch[6]; size_t k = m; const size_t m2 = 2 * m; const size_t m3 = 3 * m; tw3 = tw2 = tw1 = st->twiddles; do { C_FIXDIV(*Fout, 4); C_FIXDIV(Fout[m], 4); C_FIXDIV(Fout[m2], 4); C_FIXDIV(Fout[m3], 4); C_MUL(scratch[0], Fout[m], *tw1); C_MUL(scratch[1], Fout[m2], *tw2); C_MUL(scratch[2], Fout[m3], *tw3); C_SUB(scratch[5], *Fout, scratch[1]); C_ADDTO(*Fout, scratch[1]); C_ADD(scratch[3], scratch[0], scratch[2]); C_SUB(scratch[4], scratch[0], scratch[2]); C_SUB(Fout[m2], *Fout, scratch[3]); tw1 += fstride; tw2 += fstride * 2; tw3 += fstride * 3; C_ADDTO(*Fout, scratch[3]); if (st->inverse) { Fout[m].r = scratch[5].r - scratch[4].i; Fout[m].i = scratch[5].i + scratch[4].r; Fout[m3].r = scratch[5].r + scratch[4].i; Fout[m3].i = scratch[5].i - scratch[4].r; } else { Fout[m].r = scratch[5].r + scratch[4].i; Fout[m].i = scratch[5].i - scratch[4].r; Fout[m3].r = scratch[5].r - scratch[4].i; Fout[m3].i = scratch[5].i + scratch[4].r; } ++Fout; } while (--k); } static void kf_bfly3(kiss_fft_cpx* Fout, const size_t fstride, const kiss_fft_cfg st, size_t m) { size_t k = m; const size_t m2 = 2 * m; kiss_fft_cpx *tw1, *tw2; kiss_fft_cpx scratch[5]; kiss_fft_cpx epi3; epi3 = st->twiddles[fstride * m]; tw1 = tw2 = st->twiddles; do { C_FIXDIV(*Fout, 3); C_FIXDIV(Fout[m], 3); C_FIXDIV(Fout[m2], 3); C_MUL(scratch[1], Fout[m], *tw1); C_MUL(scratch[2], Fout[m2], *tw2); C_ADD(scratch[3], scratch[1], scratch[2]); C_SUB(scratch[0], scratch[1], scratch[2]); tw1 += fstride; tw2 += fstride * 2; Fout[m].r = Fout->r - HALF_OF(scratch[3].r); Fout[m].i = Fout->i - HALF_OF(scratch[3].i); C_MULBYSCALAR(scratch[0], epi3.i); C_ADDTO(*Fout, scratch[3]); Fout[m2].r = Fout[m].r + scratch[0].i; Fout[m2].i = Fout[m].i - scratch[0].r; Fout[m].r -= scratch[0].i; Fout[m].i += scratch[0].r; ++Fout; } while (--k); } static void kf_bfly5(kiss_fft_cpx* Fout, const size_t fstride, const kiss_fft_cfg st, int m) { kiss_fft_cpx *Fout0, *Fout1, *Fout2, *Fout3, *Fout4; int u; kiss_fft_cpx scratch[13]; kiss_fft_cpx* twiddles = st->twiddles; kiss_fft_cpx* tw; kiss_fft_cpx ya, yb; ya = twiddles[fstride * m]; yb = twiddles[fstride * 2 * m]; Fout0 = Fout; Fout1 = Fout0 + m; Fout2 = Fout0 + 2 * m; Fout3 = Fout0 + 3 * m; Fout4 = Fout0 + 4 * m; tw = st->twiddles; for (u = 0; u < m; ++u) { C_FIXDIV(*Fout0, 5); C_FIXDIV(*Fout1, 5); C_FIXDIV(*Fout2, 5); C_FIXDIV(*Fout3, 5); C_FIXDIV(*Fout4, 5); scratch[0] = *Fout0; C_MUL(scratch[1], *Fout1, tw[u * fstride]); C_MUL(scratch[2], *Fout2, tw[2 * u * fstride]); C_MUL(scratch[3], *Fout3, tw[3 * u * fstride]); C_MUL(scratch[4], *Fout4, tw[4 * u * fstride]); C_ADD(scratch[7], scratch[1], scratch[4]); C_SUB(scratch[10], scratch[1], scratch[4]); C_ADD(scratch[8], scratch[2], scratch[3]); C_SUB(scratch[9], scratch[2], scratch[3]); Fout0->r += scratch[7].r + scratch[8].r; Fout0->i += scratch[7].i + scratch[8].i; scratch[5].r = scratch[0].r + S_MUL(scratch[7].r, ya.r) + S_MUL(scratch[8].r, yb.r); scratch[5].i = scratch[0].i + S_MUL(scratch[7].i, ya.r) + S_MUL(scratch[8].i, yb.r); scratch[6].r = S_MUL(scratch[10].i, ya.i) + S_MUL(scratch[9].i, yb.i); scratch[6].i = -S_MUL(scratch[10].r, ya.i) - S_MUL(scratch[9].r, yb.i); C_SUB(*Fout1, scratch[5], scratch[6]); C_ADD(*Fout4, scratch[5], scratch[6]); scratch[11].r = scratch[0].r + S_MUL(scratch[7].r, yb.r) + S_MUL(scratch[8].r, ya.r); scratch[11].i = scratch[0].i + S_MUL(scratch[7].i, yb.r) + S_MUL(scratch[8].i, ya.r); scratch[12].r = -S_MUL(scratch[10].i, yb.i) + S_MUL(scratch[9].i, ya.i); scratch[12].i = S_MUL(scratch[10].r, yb.i) - S_MUL(scratch[9].r, ya.i); C_ADD(*Fout2, scratch[11], scratch[12]); C_SUB(*Fout3, scratch[11], scratch[12]); ++Fout0; ++Fout1; ++Fout2; ++Fout3; ++Fout4; } } /* perform the butterfly for one stage of a mixed radix FFT */ static void kf_bfly_generic(kiss_fft_cpx* Fout, const size_t fstride, const kiss_fft_cfg st, int m, int p) { int u, k, q1, q; kiss_fft_cpx* twiddles = st->twiddles; kiss_fft_cpx t; int Norig = st->nfft; kiss_fft_cpx* scratch = (kiss_fft_cpx*)KISS_FFT_TMP_ALLOC(sizeof(kiss_fft_cpx) * p); for (u = 0; u < m; ++u) { k = u; for (q1 = 0; q1 < p; ++q1) { scratch[q1] = Fout[k]; C_FIXDIV(scratch[q1], p); k += m; } k = u; for (q1 = 0; q1 < p; ++q1) { int twidx = 0; Fout[k] = scratch[0]; for (q = 1; q < p; ++q) { twidx += fstride * k; if (twidx >= Norig) twidx -= Norig; C_MUL(t, scratch[q], twiddles[twidx]); C_ADDTO(Fout[k], t); } k += m; } } KISS_FFT_TMP_FREE(scratch); } static void kf_work(kiss_fft_cpx* Fout, const kiss_fft_cpx* f, const size_t fstride, int in_stride, int* factors, const kiss_fft_cfg st) { kiss_fft_cpx* Fout_beg = Fout; const int p = *factors++; /* the radix */ const int m = *factors++; /* stage's fft length/p */ const kiss_fft_cpx* Fout_end = Fout + p * m; #ifdef _OPENMP // use openmp extensions at the // top-level (not recursive) if (fstride == 1 && p <= 5) { int k; // execute the p different work units in different threads #pragma omp parallel for for (k = 0; k < p; ++k) kf_work(Fout + k * m, f + fstride * in_stride * k, fstride * p, in_stride, factors, st); // all threads have joined by this point switch (p) { case 2: kf_bfly2(Fout, fstride, st, m); break; case 3: kf_bfly3(Fout, fstride, st, m); break; case 4: kf_bfly4(Fout, fstride, st, m); break; case 5: kf_bfly5(Fout, fstride, st, m); break; default: kf_bfly_generic(Fout, fstride, st, m, p); break; } return; } #endif if (m == 1) { do { *Fout = *f; f += fstride * in_stride; } while (++Fout != Fout_end); } else { do { // recursive call: // DFT of size m*p performed by doing // p instances of smaller DFTs of size m, // each one takes a decimated version of the input kf_work(Fout, f, fstride * p, in_stride, factors, st); f += fstride * in_stride; } while ((Fout += m) != Fout_end); } Fout = Fout_beg; // recombine the p smaller DFTs switch (p) { case 2: kf_bfly2(Fout, fstride, st, m); break; case 3: kf_bfly3(Fout, fstride, st, m); break; case 4: kf_bfly4(Fout, fstride, st, m); break; case 5: kf_bfly5(Fout, fstride, st, m); break; default: kf_bfly_generic(Fout, fstride, st, m, p); break; } } /* facbuf is populated by p1,m1,p2,m2, ... where p[i] * m[i] = m[i-1] m0 = n */ static void kf_factor(int n, int* facbuf) { int p = 4; double floor_sqrt; floor_sqrt = floor(sqrt((double)n)); /*factor out powers of 4, powers of 2, then any remaining primes */ do { while (n % p) { switch (p) { case 4: p = 2; break; case 2: p = 3; break; default: p += 2; break; } if (p > floor_sqrt) p = n; /* no more factors, skip to end */ } n /= p; *facbuf++ = p; *facbuf++ = n; } while (n > 1); } /* * * User-callable function to allocate all necessary storage space for the fft. * * The return value is a contiguous block of memory, allocated with malloc. As such, * It can be freed with free(), rather than a kiss_fft-specific function. * */ kiss_fft_cfg kiss_fft_alloc(int nfft, int inverse_fft, void* mem, size_t* lenmem) { kiss_fft_cfg st = NULL; size_t memneeded = sizeof(struct kiss_fft_state) + sizeof(kiss_fft_cpx) * (nfft - 1); /* twiddle factors*/ if (lenmem == NULL) { st = (kiss_fft_cfg)KISS_FFT_MALLOC(memneeded); } else { if (mem != NULL && *lenmem >= memneeded) st = (kiss_fft_cfg)mem; *lenmem = memneeded; } if (st) { int i; st->nfft = nfft; st->inverse = inverse_fft; for (i = 0; i < nfft; ++i) { const double pi = 3.141592653589793238462643383279502884197169399375105820974944; double phase = -2 * pi * i / nfft; if (st->inverse) phase *= -1; kf_cexp(st->twiddles + i, phase); } kf_factor(nfft, st->factors); } return st; } void kiss_fft_stride(kiss_fft_cfg st, const kiss_fft_cpx* fin, kiss_fft_cpx* fout, int in_stride) { if (fin == fout) { // NOTE: this is not really an in-place FFT algorithm. // It just performs an out-of-place FFT into a temp buffer kiss_fft_cpx* tmpbuf = (kiss_fft_cpx*)KISS_FFT_TMP_ALLOC(sizeof(kiss_fft_cpx) * st->nfft); kf_work(tmpbuf, fin, 1, in_stride, st->factors, st); memcpy(fout, tmpbuf, sizeof(kiss_fft_cpx) * st->nfft); KISS_FFT_TMP_FREE(tmpbuf); } else { kf_work(fout, fin, 1, in_stride, st->factors, st); } } void kiss_fft(kiss_fft_cfg cfg, const kiss_fft_cpx* fin, kiss_fft_cpx* fout) { kiss_fft_stride(cfg, fin, fout, 1); } void kiss_fft_cleanup(void) { // nothing needed any more } int kiss_fft_next_fast_size(int n) { while (1) { int m = n; while ((m % 2) == 0) m /= 2; while ((m % 3) == 0) m /= 3; while ((m % 5) == 0) m /= 5; if (m <= 1) break; /* n is completely factorable by twos, threes, and fives */ n++; } return n; }
GB_unaryop__abs_fp64_int8.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_fp64_int8 // op(A') function: GB_tran__abs_fp64_int8 // C type: double // A type: int8_t // cast: double cij = (double) aij // unaryop: cij = fabs (aij) #define GB_ATYPE \ int8_t #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = fabs (x) ; // casting #define GB_CASTING(z, x) \ double z = (double) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_FP64 || GxB_NO_INT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_fp64_int8 ( double *restrict Cx, const int8_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_fp64_int8 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
Parallelizer.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2010 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_PARALLELIZER_H #define EIGEN_PARALLELIZER_H #if EIGEN_HAS_CXX11_ATOMIC #include <atomic> #endif namespace Eigen { namespace internal { /** \internal */ inline void manage_multi_threading(Action action, int* v) { static int m_maxThreads = -1; EIGEN_UNUSED_VARIABLE(m_maxThreads); if(action==SetAction) { eigen_internal_assert(v!=0); m_maxThreads = *v; } else if(action==GetAction) { eigen_internal_assert(v!=0); #ifdef EIGEN_HAS_OPENMP if(m_maxThreads>0) *v = m_maxThreads; else *v = omp_get_max_threads(); #else *v = 1; #endif } else { eigen_internal_assert(false); } } } /** Must be call first when calling Eigen from multiple threads */ inline void initParallel() { int nbt; internal::manage_multi_threading(GetAction, &nbt); std::ptrdiff_t l1, l2, l3; internal::manage_caching_sizes(GetAction, &l1, &l2, &l3); } /** \returns the max number of threads reserved for Eigen * \sa setNbThreads */ inline int nbThreads() { int ret; internal::manage_multi_threading(GetAction, &ret); return ret; } /** Sets the max number of threads reserved for Eigen * \sa nbThreads */ inline void setNbThreads(int v) { internal::manage_multi_threading(SetAction, &v); } namespace internal { template<typename Index> struct GemmParallelInfo { GemmParallelInfo() : sync(-1), users(0), lhs_start(0), lhs_length(0) {} // volatile is not enough on all architectures (see bug 1572) // to guarantee that when thread A says to thread B that it is // done with packing a block, then all writes have been really // carried out... C++11 memory model+atomic guarantees this. #if EIGEN_HAS_CXX11_ATOMIC std::atomic<Index> sync; std::atomic<int> users; #else Index volatile sync; int volatile users; #endif Index lhs_start; Index lhs_length; }; template<bool Condition, typename Functor, typename Index> void parallelize_gemm(const Functor& func, Index rows, Index cols, Index depth, bool transpose) { // TODO when EIGEN_USE_BLAS is defined, // we should still enable OMP for other scalar types // Without C++11, we have to disable GEMM's parallelization on // non x86 architectures because there volatile is not enough for our purpose. // See bug 1572. #if (! defined(EIGEN_HAS_OPENMP)) || defined(EIGEN_USE_BLAS) || ((!EIGEN_HAS_CXX11_ATOMIC) && !(EIGEN_ARCH_i386_OR_x86_64)) // FIXME the transpose variable is only needed to properly split // the matrix product when multithreading is enabled. This is a temporary // fix to support row-major destination matrices. This whole // parallelizer mechanism has to be redesigned anyway. EIGEN_UNUSED_VARIABLE(depth); EIGEN_UNUSED_VARIABLE(transpose); func(0,rows, 0,cols); #else // Dynamically check whether we should enable or disable OpenMP. // The conditions are: // - the max number of threads we can create is greater than 1 // - we are not already in a parallel code // - the sizes are large enough // compute the maximal number of threads from the size of the product: // This first heuristic takes into account that the product kernel is fully optimized when working with nr columns at once. Index size = transpose ? rows : cols; Index pb_max_threads = std::max<Index>(1,size / Functor::Traits::nr); // compute the maximal number of threads from the total amount of work: double work = static_cast<double>(rows) * static_cast<double>(cols) * static_cast<double>(depth); double kMinTaskSize = 50000; // FIXME improve this heuristic. pb_max_threads = std::max<Index>(1, std::min<Index>(pb_max_threads, static_cast<Index>( work / kMinTaskSize ) )); // compute the number of threads we are going to use Index threads = std::min<Index>(nbThreads(), pb_max_threads); // if multi-threading is explicitly disabled, not useful, or if we already are in a parallel session, // then abort multi-threading // FIXME omp_get_num_threads()>1 only works for openmp, what if the user does not use openmp? if((!Condition) || (threads==1) || (omp_get_num_threads()>1)) return func(0,rows, 0,cols); Eigen::initParallel(); func.initParallelSession(threads); if(transpose) std::swap(rows,cols); ei_declare_aligned_stack_constructed_variable(GemmParallelInfo<Index>,info,threads,0); #pragma omp parallel num_threads(threads) { Index i = omp_get_thread_num(); // Note that the actual number of threads might be lower than the number of request ones. Index actual_threads = omp_get_num_threads(); Index blockCols = (cols / actual_threads) & ~Index(0x3); Index blockRows = (rows / actual_threads); blockRows = (blockRows/Functor::Traits::mr)*Functor::Traits::mr; Index r0 = i*blockRows; Index actualBlockRows = (i+1==actual_threads) ? rows-r0 : blockRows; Index c0 = i*blockCols; Index actualBlockCols = (i+1==actual_threads) ? cols-c0 : blockCols; info[i].lhs_start = r0; info[i].lhs_length = actualBlockRows; if(transpose) func(c0, actualBlockCols, 0, rows, info); else func(0, rows, c0, actualBlockCols, info); } #endif } } // end namespace internal } // end namespace Eigen #endif // EIGEN_PARALLELIZER_H
linked_omp3_tasks.c
#include <omp.h> #include <stdio.h> #include <stdlib.h> #ifndef N #define N 5 #endif #ifndef FS #define FS 38 #endif struct node { int data; int fibdata; struct node* next; }; struct node* init_list(struct node* p); void processwork(struct node* p); int fib(int n); int fib(int n) { int x, y; if (n < 2) { return (n); } else { x = fib(n - 1); y = fib(n - 2); return (x + y); } } void processwork(struct node* p) { int n, temp; n = p->data; temp = fib(n); p->fibdata = temp; } struct node* init_list(struct node* p) { int i; struct node* head = NULL; struct node* temp = NULL; head = (struct node*)malloc(sizeof(struct node)); p = head; p->data = FS; p->fibdata = 0; for (i = 0; i < N; i++) { temp = (struct node*)malloc(sizeof(struct node)); p->next = temp; p = temp; p->data = FS + i + 1; p->fibdata = i + 1; } p->next = NULL; return head; } int main() { double start, end; struct node* p = NULL; struct node* temp = NULL; struct node* head = NULL; printf("Process linked list\n"); printf(" Each linked list node will be processed by function 'processwork()'\n"); printf(" Each ll node will compute %d fibonacci numbers beginning with %d\n", N, FS); p = init_list(p); head = p; start = omp_get_wtime(); #pragma omp parallel { #pragma omp master printf("Threads: %d\n", omp_get_num_threads()); #pragma omp single { p = head; while (p) { #pragma omp task firstprivate(p) // first private is required { processwork(p); } p = p->next; } } } end = omp_get_wtime(); p = head; while (p != NULL) { printf("%d : %d\n", p->data, p->fibdata); temp = p->next; free(p); p = temp; } free(p); printf("Compute Time: %f seconds\n", end - start); return 0; }
GB_unaryop__identity_uint64_uint64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_uint64_uint64 // op(A') function: GB_tran__identity_uint64_uint64 // C type: uint64_t // A type: uint64_t // cast: uint64_t cij = (uint64_t) aij // unaryop: cij = aij #define GB_ATYPE \ uint64_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ uint64_t z = (uint64_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_uint64_uint64 ( uint64_t *restrict Cx, const uint64_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_uint64_uint64 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
matmul.c
//------------------------------------------------------------------------------------------------------------------------------ // Samuel Williams // SWWilliams@lbl.gov // Lawrence Berkeley National Lab //------------------------------------------------------------------------------------------------------------------------------ void matmul(level_type * level, double *C, int * id_A, int * id_B, int rows, int cols, int A_equals_B_transpose){ // *id_A = m vector_id's (conceptually pointers to the rows of a m x level->num_my_boxes*volume matrix) // *id_B = n vector_id's (conceptually pointers to the columns of a level->num_my_boxes*volume matrix x n) // *C is a mxn matrix where C[rows][cols] = dot(id_A[rows],id_B[cols]) // FIX, id_A and id_B are likely the same and thus C[][] will be symmetric (modulo missing row?) // if(A_equals_B_transpose && (cols>=rows)) then use id_B and only run for nn>=mm // common case for s-step Krylov methods // C_is_symmetric && cols< rows (use id_A) int mm,nn; uint64_t _timeStart = CycleTime(); // FIX... rather than performing an all_reduce on the essentially symmetric [G,g], do the all_reduce on the upper triangle and then duplicate (saves BW) // #pragma omp parallel for schedule(static,1) collapse(2) hclib::finish([] { hclib::loop_domain_2d loop(rows, cols); hclib::forasync(&loop, [] (int mm, int nn) { if(nn>=mm){ // upper triangular int box; double a_dot_b_level = 0.0; for(box=0;box<level->num_my_boxes;box++){ int i,j,k; const int jStride = level->my_boxes[box].jStride; const int kStride = level->my_boxes[box].kStride; const int ghosts = level->my_boxes[box].ghosts; const int dim = level->my_boxes[box].dim; double * __restrict__ grid_a = level->my_boxes[box].vectors[id_A[mm]] + ghosts*(1+jStride+kStride); // i.e. [0] = first non ghost zone point double * __restrict__ grid_b = level->my_boxes[box].vectors[id_B[nn]] + ghosts*(1+jStride+kStride); double a_dot_b_box = 0.0; for(k=0;k<dim;k++){ for(j=0;j<dim;j++){ for(i=0;i<dim;i++){ int ijk = i + j*jStride + k*kStride; a_dot_b_box += grid_a[ijk]*grid_b[ijk]; }}} a_dot_b_level+=a_dot_b_box; } C[mm*cols + nn] = a_dot_b_level; // C[mm][nn] if((mm<cols)&&(nn<rows)){C[nn*cols + mm] = a_dot_b_level;}// C[nn][mm] } }); }); level->cycles.blas3 += (uint64_t)(CycleTime()-_timeStart); #ifdef USE_MPI double *send_buffer = (double*)malloc(rows*cols*sizeof(double)); for(mm=0;mm<rows;mm++){ for(nn=0;nn<cols;nn++){ send_buffer[mm*cols + nn] = C[mm*cols + nn]; }} uint64_t _timeStartAllReduce = CycleTime(); hclib::MPI_Allreduce(send_buffer,C,rows*cols,MPI_DOUBLE,MPI_SUM,level->MPI_COMM_ALLREDUCE); uint64_t _timeEndAllReduce = CycleTime(); level->cycles.collectives += (uint64_t)(_timeEndAllReduce-_timeStartAllReduce); free(send_buffer); #endif }
symv_x_dia_u_hi.c
#include "alphasparse/kernel.h" #include "alphasparse/opt.h" #include "alphasparse/util.h" #include <string.h> #ifdef _OPENMP #include <omp.h> #endif static alphasparse_status_t ONAME_omp(const ALPHA_Number alpha, const ALPHA_SPMAT_DIA* A, const ALPHA_Number* x, const ALPHA_Number beta, ALPHA_Number* y) { const ALPHA_INT m = A->rows; const ALPHA_INT n = A->cols; if(m != n) return ALPHA_SPARSE_STATUS_INVALID_VALUE; const ALPHA_INT thread_num = alpha_get_thread_num(); ALPHA_Number** tmp = (ALPHA_Number**)malloc(sizeof(ALPHA_Number*) * thread_num); for(int i = 0; i < thread_num; ++i) { tmp[i] = malloc(sizeof(ALPHA_Number) * m); memset(tmp[i], 0, sizeof(ALPHA_Number) * m); } const ALPHA_INT diags = A->ndiag; #ifdef _OPENMP #pragma omp parallel for num_threads(thread_num) #endif for (ALPHA_INT i = 0; i < diags; ++i) { const ALPHA_INT threadId = alpha_get_thread_id(); const ALPHA_INT dis = A->distance[i]; if(dis > 0) { const ALPHA_INT row_start = 0; const ALPHA_INT col_start = dis; const ALPHA_INT nnz = m - dis; const ALPHA_INT start = i * A->lval; for(ALPHA_INT j = 0; j < nnz; ++j) { ALPHA_Number v; alpha_mul(v, alpha, A->values[start + j]); alpha_madde(tmp[threadId][row_start + j], v, x[col_start + j]); alpha_madde(tmp[threadId][col_start + j], v, x[row_start + j]); } } } #ifdef _OPENMP #pragma omp parallel for num_threads(thread_num) #endif for(ALPHA_INT i = 0; i < m; ++i) { alpha_mul(y[i], beta, y[i]); alpha_madde(y[i], alpha, x[i]); for(ALPHA_INT j = 0; j < thread_num; ++j) { alpha_add(y[i], y[i], tmp[j][i]); } } #ifdef _OPENMP #pragma omp parallel for num_threads(thread_num) #endif for (ALPHA_INT i = 0; i < thread_num; ++i) { alpha_free(tmp[i]); } alpha_free(tmp); return ALPHA_SPARSE_STATUS_SUCCESS; } alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_DIA* A, const ALPHA_Number* x, const ALPHA_Number beta, ALPHA_Number* y) { return ONAME_omp(alpha, A, x, beta, y); }
hermm_c_coo_u_hi_row.c
#include "alphasparse/kernel.h" #include "alphasparse/util.h" alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_COO *mat, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, const ALPHA_Number beta, ALPHA_Number *y, const ALPHA_INT ldy) { ALPHA_INT m = mat->rows; ALPHA_INT n = columns; ALPHA_INT num_threads = alpha_get_thread_num(); #ifdef _OPENMP #pragma omp parallel for num_threads(num_threads) #endif for (ALPHA_INT r = 0; r < m; ++r) for (ALPHA_INT c = 0; c < n; c++) { ALPHA_Number tmp1, tmp2; alpha_mul(tmp1, y[index2(r, c, ldy)], beta); alpha_mul(tmp2, x[index2(r, c, ldx)], alpha); alpha_add(y[index2(r, c, ldy)], tmp1, tmp2); } num_threads = ((columns/512*24 + 24) < num_threads) ? (columns/512*24 + 24) : num_threads; #ifdef _OPENMP #pragma omp parallel num_threads(num_threads) #endif { ALPHA_INT tid = alpha_get_thread_id(); ALPHA_INT bcl = cross_block_low(tid, num_threads, columns); ALPHA_INT bch = cross_block_high(tid, num_threads, columns); ALPHA_INT bcs = cross_block_size(tid, num_threads, columns); for (ALPHA_INT ai = 0; ai < mat->nnz; ai++) { ALPHA_INT ac = mat->col_indx[ai]; ALPHA_INT r = mat->row_indx[ai]; if (ac > r) { ALPHA_Number val; alpha_mul(val, alpha, mat->values[ai]); ALPHA_Number val_c; alpha_conj(val_c,mat->values[ai]); alpha_mul(val_c, alpha, val_c); for (ALPHA_INT c = bcl; c < bch; ++c) alpha_madde(y[index2(r, c, ldy)], val, x[index2(ac, c, ldx)]); for (ALPHA_INT c = bcl; c < bch; ++c) alpha_madde(y[index2(ac, c, ldy)], val_c, x[index2(r, c, ldx)]); } } } return ALPHA_SPARSE_STATUS_SUCCESS; }
kmeans.mp.c
#include "../include/util.h" /* Kmeans data. */ float mindistance; /* Minimum distance. */ int npoints; /* Number of points. */ vector_t *data; /* Data being clustered. */ int ncentroids; /* Number of clusters. */ int *map; /* Map of clusters. */ vector_t *centroids; /* Data centroids. */ vector_t *tmp; /* Temporary centroids. */ int *dirty; /* Dirty centroid? */ int *too_far; /* Are there any points too far? */ int *has_changed; /* has any centroid change? */ /* * Populates clusters. */ static void populate(void) { int tid; /* Thread ID. */ int i, j; /* Loop indexes. */ float tmp; /* Auxiliary variable. */ float distance; /* Distance. */ memset(too_far, 0, nthreads*sizeof(int)); /* Iterate over data points. */ #pragma omp parallel private(i, j, tmp, distance, tid) default(shared) { tid = omp_get_thread_num(); #pragma omp for for (i = 0; i < npoints; i++) { distance = vector_distance(centroids[map[i]], data[i]); /* Look for closest cluster. */ for (j = 0; j < ncentroids; j++) { /* Point is in this cluster. */ if (j == map[i]) continue; tmp = vector_distance(centroids[j], data[i]); /* Found. */ if (tmp < distance) { map[i] = j; distance = tmp; #pragma omp critical dirty[j] = 1; } } /* Cluster is too far away. */ if (distance > mindistance) too_far[tid] = 1; } } } /* * Computes cluster's centroids. */ static void compute_centroids(void) { int tid; /* Thread ID. */ int i, j; /* Loop indexes. */ int population; /* Cluster population. */ memset(has_changed, 0, nthreads*sizeof(int)); /* Compute means. */ #pragma omp parallel private(i, j, population, tid) default(shared) { tid = omp_get_thread_num(); #pragma omp for for (i = 0; i < ncentroids; i++) { /* Cluster did not change. */ if (!dirty[i]) continue; /* Initialize temporary vector.*/ vector_assign(tmp[tid], centroids[i]); vector_clear(centroids[i]); /* Compute cluster's mean. */ population = 0; for (j = 0; j < npoints; j++) { /* Not a member of this cluster. */ if (map[j] != i) continue; vector_add(centroids[i], data[j]); population++; } if (population > 1) vector_mult(centroids[i], 1.0/population); has_changed[tid] = 1; } } memset(dirty, 0, ncentroids*sizeof(int)); } /* * Clusters data. */ int *kmeans(vector_t *_data, int _npoints, int _ncentroids, float _mindistance) { int i, j; /* Loop index. */ int again; /* Loop again? */ /* Setup parameters. */ data = _data; npoints = _npoints; ncentroids = _ncentroids; mindistance = _mindistance; /* Create auxiliary structures. */ map = scalloc(npoints, sizeof(int)); too_far = smalloc(nthreads*sizeof(int)); has_changed = smalloc(nthreads*sizeof(int)); dirty = smalloc(ncentroids*sizeof(int)); centroids = smalloc(ncentroids*sizeof(vector_t)); for (i = 0; i < ncentroids; i++) centroids[i] = vector_create(vector_size(data[0])); tmp = smalloc(nthreads*sizeof(vector_t)); for (i = 0; i < nthreads; i++) tmp[i] = vector_create(vector_size(data[0])); /* Initialize mapping. */ for (i = 0; i < npoints; i++) map[i] = -1; /* Initialize centroids. */ for (i = 0; i < ncentroids; i++) { dirty[i] = 1; j = randnum()%npoints; vector_assign(centroids[i], data[j]); map[j] = i; } /* Map unmapped data points. */ for (i = 0; i < npoints; i++) { if (map[i] < 0) map[i] = randnum()%ncentroids; } /* Cluster data. */ do { populate(); compute_centroids(); /* Check if we need to loop. */ for (i = 0; i < nthreads; i++) { /* We will need another iteration. */ if (too_far[i] && has_changed[i]) break; } again = (i < nthreads) ? 1 : 0; } while (again); /* House keeping. */ for (i = 0; i < ncentroids; i++) vector_destroy(centroids[i]); free(centroids); for (i = 0; i < nthreads; i++) vector_destroy(tmp[i]); free(tmp); free(too_far); free(has_changed); free(dirty); return (map); }
acado_integrator.c
/* * This file was auto-generated using the ACADO Toolkit. * * While ACADO Toolkit is free software released under the terms of * the GNU Lesser General Public License (LGPL), the generated code * as such remains the property of the user who used ACADO Toolkit * to generate this code. In particular, user dependent data of the code * do not inherit the GNU LGPL license. On the other hand, parts of the * generated code that are a direct copy of source code from the * ACADO Toolkit or the software tools it is based on, remain, as derived * work, automatically covered by the LGPL license. * * ACADO Toolkit is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * */ #include "acado_common.h" real_t rk_dim8_swap; /** Column vector of size: 8 */ real_t rk_dim8_bPerm[ 8 ]; /** Column vector of size: 17 */ real_t auxVar[ 17 ]; real_t rk_ttt; /** Row vector of size: 6 */ real_t rk_xxx[ 6 ]; /** Matrix of size: 4 x 2 (row major format) */ real_t rk_kkk[ 8 ]; /** Matrix of size: 8 x 8 (row major format) */ real_t rk_A[ 64 ]; /** Column vector of size: 8 */ real_t rk_b[ 8 ]; /** Row vector of size: 8 */ int rk_dim8_perm[ 8 ]; /** Column vector of size: 4 */ real_t rk_rhsTemp[ 4 ]; /** Matrix of size: 2 x 24 (row major format) */ real_t rk_diffsTemp2[ 48 ]; /** Matrix of size: 4 x 2 (row major format) */ real_t rk_diffK[ 8 ]; /** Matrix of size: 4 x 6 (row major format) */ real_t rk_diffsPrev2[ 24 ]; /** Matrix of size: 4 x 6 (row major format) */ real_t rk_diffsNew2[ 24 ]; #pragma omp threadprivate( auxVar, rk_ttt, rk_xxx, rk_kkk, rk_diffK, rk_rhsTemp, rk_dim8_perm, rk_A, rk_b, rk_diffsPrev2, rk_diffsNew2, rk_diffsTemp2, rk_dim8_swap, rk_dim8_bPerm ) void acado_rhs(const real_t* in, real_t* out) { const real_t* xd = in; const real_t* u = in + 4; /* Vector of auxiliary variables; number of elements: 5. */ real_t* a = auxVar; /* Compute intermediate quantities: */ a[0] = (tan(u[1])); a[1] = (atan(((real_t)(5.0000000000000000e-01)*a[0]))); a[2] = (cos((xd[3]+a[1]))); a[3] = (sin((xd[3]+a[1]))); a[4] = (sin(a[1])); /* Compute outputs: */ out[0] = (xd[2]*a[2]); out[1] = (xd[2]*a[3]); out[2] = u[0]; out[3] = ((xd[2]/(real_t)(6.7000000000000004e-01))*a[4]); } void acado_diffs(const real_t* in, real_t* out) { const real_t* xd = in; const real_t* u = in + 4; /* Vector of auxiliary variables; number of elements: 17. */ real_t* a = auxVar; /* Compute intermediate quantities: */ a[0] = (tan(u[1])); a[1] = (atan(((real_t)(5.0000000000000000e-01)*a[0]))); a[2] = (cos((xd[3]+a[1]))); a[3] = ((real_t)(-1.0000000000000000e+00)*(sin((xd[3]+a[1])))); a[4] = ((real_t)(1.0000000000000000e+00)/(pow((cos(u[1])),2))); a[5] = ((real_t)(1.0000000000000000e+00)/((real_t)(1.0000000000000000e+00)+(pow(((real_t)(5.0000000000000000e-01)*a[0]),2)))); a[6] = (((real_t)(5.0000000000000000e-01)*a[4])*a[5]); a[7] = (a[6]*a[3]); a[8] = (sin((xd[3]+a[1]))); a[9] = (cos((xd[3]+a[1]))); a[10] = (((real_t)(5.0000000000000000e-01)*a[4])*a[5]); a[11] = (a[10]*a[9]); a[12] = ((real_t)(1.0000000000000000e+00)/(real_t)(6.7000000000000004e-01)); a[13] = (sin(a[1])); a[14] = (((real_t)(5.0000000000000000e-01)*a[4])*a[5]); a[15] = (cos(a[1])); a[16] = (a[14]*a[15]); /* Compute outputs: */ out[0] = (real_t)(0.0000000000000000e+00); out[1] = (real_t)(0.0000000000000000e+00); out[2] = a[2]; out[3] = (xd[2]*a[3]); out[4] = (real_t)(0.0000000000000000e+00); out[5] = (xd[2]*a[7]); out[6] = (real_t)(0.0000000000000000e+00); out[7] = (real_t)(0.0000000000000000e+00); out[8] = a[8]; out[9] = (xd[2]*a[9]); out[10] = (real_t)(0.0000000000000000e+00); out[11] = (xd[2]*a[11]); out[12] = (real_t)(0.0000000000000000e+00); out[13] = (real_t)(0.0000000000000000e+00); out[14] = (real_t)(0.0000000000000000e+00); out[15] = (real_t)(0.0000000000000000e+00); out[16] = (real_t)(1.0000000000000000e+00); out[17] = (real_t)(0.0000000000000000e+00); out[18] = (real_t)(0.0000000000000000e+00); out[19] = (real_t)(0.0000000000000000e+00); out[20] = (a[12]*a[13]); out[21] = (real_t)(0.0000000000000000e+00); out[22] = (real_t)(0.0000000000000000e+00); out[23] = ((xd[2]/(real_t)(6.7000000000000004e-01))*a[16]); } void acado_solve_dim8_triangular( real_t* const A, real_t* const b ) { b[7] = b[7]/A[63]; b[6] -= + A[55]*b[7]; b[6] = b[6]/A[54]; b[5] -= + A[47]*b[7]; b[5] -= + A[46]*b[6]; b[5] = b[5]/A[45]; b[4] -= + A[39]*b[7]; b[4] -= + A[38]*b[6]; b[4] -= + A[37]*b[5]; b[4] = b[4]/A[36]; b[3] -= + A[31]*b[7]; b[3] -= + A[30]*b[6]; b[3] -= + A[29]*b[5]; b[3] -= + A[28]*b[4]; b[3] = b[3]/A[27]; b[2] -= + A[23]*b[7]; b[2] -= + A[22]*b[6]; b[2] -= + A[21]*b[5]; b[2] -= + A[20]*b[4]; b[2] -= + A[19]*b[3]; b[2] = b[2]/A[18]; b[1] -= + A[15]*b[7]; b[1] -= + A[14]*b[6]; b[1] -= + A[13]*b[5]; b[1] -= + A[12]*b[4]; b[1] -= + A[11]*b[3]; b[1] -= + A[10]*b[2]; b[1] = b[1]/A[9]; b[0] -= + A[7]*b[7]; b[0] -= + A[6]*b[6]; b[0] -= + A[5]*b[5]; b[0] -= + A[4]*b[4]; b[0] -= + A[3]*b[3]; b[0] -= + A[2]*b[2]; b[0] -= + A[1]*b[1]; b[0] = b[0]/A[0]; } real_t acado_solve_dim8_system( real_t* const A, real_t* const b, int* const rk_perm ) { real_t det; int i; int j; int k; int indexMax; int intSwap; real_t valueMax; real_t temp; for (i = 0; i < 8; ++i) { rk_perm[i] = i; } det = 1.0000000000000000e+00; for( i=0; i < (7); i++ ) { indexMax = i; valueMax = fabs(A[i*8+i]); for( j=(i+1); j < 8; j++ ) { temp = fabs(A[j*8+i]); if( temp > valueMax ) { indexMax = j; valueMax = temp; } } if( indexMax > i ) { for (k = 0; k < 8; ++k) { rk_dim8_swap = A[i*8+k]; A[i*8+k] = A[indexMax*8+k]; A[indexMax*8+k] = rk_dim8_swap; } rk_dim8_swap = b[i]; b[i] = b[indexMax]; b[indexMax] = rk_dim8_swap; intSwap = rk_perm[i]; rk_perm[i] = rk_perm[indexMax]; rk_perm[indexMax] = intSwap; } det *= A[i*8+i]; for( j=i+1; j < 8; j++ ) { A[j*8+i] = -A[j*8+i]/A[i*8+i]; for( k=i+1; k < 8; k++ ) { A[j*8+k] += A[j*8+i] * A[i*8+k]; } b[j] += A[j*8+i] * b[i]; } } det *= A[63]; det = fabs(det); acado_solve_dim8_triangular( A, b ); return det; } void acado_solve_dim8_system_reuse( real_t* const A, real_t* const b, int* const rk_perm ) { rk_dim8_bPerm[0] = b[rk_perm[0]]; rk_dim8_bPerm[1] = b[rk_perm[1]]; rk_dim8_bPerm[2] = b[rk_perm[2]]; rk_dim8_bPerm[3] = b[rk_perm[3]]; rk_dim8_bPerm[4] = b[rk_perm[4]]; rk_dim8_bPerm[5] = b[rk_perm[5]]; rk_dim8_bPerm[6] = b[rk_perm[6]]; rk_dim8_bPerm[7] = b[rk_perm[7]]; rk_dim8_bPerm[1] += A[8]*rk_dim8_bPerm[0]; rk_dim8_bPerm[2] += A[16]*rk_dim8_bPerm[0]; rk_dim8_bPerm[2] += A[17]*rk_dim8_bPerm[1]; rk_dim8_bPerm[3] += A[24]*rk_dim8_bPerm[0]; rk_dim8_bPerm[3] += A[25]*rk_dim8_bPerm[1]; rk_dim8_bPerm[3] += A[26]*rk_dim8_bPerm[2]; rk_dim8_bPerm[4] += A[32]*rk_dim8_bPerm[0]; rk_dim8_bPerm[4] += A[33]*rk_dim8_bPerm[1]; rk_dim8_bPerm[4] += A[34]*rk_dim8_bPerm[2]; rk_dim8_bPerm[4] += A[35]*rk_dim8_bPerm[3]; rk_dim8_bPerm[5] += A[40]*rk_dim8_bPerm[0]; rk_dim8_bPerm[5] += A[41]*rk_dim8_bPerm[1]; rk_dim8_bPerm[5] += A[42]*rk_dim8_bPerm[2]; rk_dim8_bPerm[5] += A[43]*rk_dim8_bPerm[3]; rk_dim8_bPerm[5] += A[44]*rk_dim8_bPerm[4]; rk_dim8_bPerm[6] += A[48]*rk_dim8_bPerm[0]; rk_dim8_bPerm[6] += A[49]*rk_dim8_bPerm[1]; rk_dim8_bPerm[6] += A[50]*rk_dim8_bPerm[2]; rk_dim8_bPerm[6] += A[51]*rk_dim8_bPerm[3]; rk_dim8_bPerm[6] += A[52]*rk_dim8_bPerm[4]; rk_dim8_bPerm[6] += A[53]*rk_dim8_bPerm[5]; rk_dim8_bPerm[7] += A[56]*rk_dim8_bPerm[0]; rk_dim8_bPerm[7] += A[57]*rk_dim8_bPerm[1]; rk_dim8_bPerm[7] += A[58]*rk_dim8_bPerm[2]; rk_dim8_bPerm[7] += A[59]*rk_dim8_bPerm[3]; rk_dim8_bPerm[7] += A[60]*rk_dim8_bPerm[4]; rk_dim8_bPerm[7] += A[61]*rk_dim8_bPerm[5]; rk_dim8_bPerm[7] += A[62]*rk_dim8_bPerm[6]; acado_solve_dim8_triangular( A, rk_dim8_bPerm ); b[0] = rk_dim8_bPerm[0]; b[1] = rk_dim8_bPerm[1]; b[2] = rk_dim8_bPerm[2]; b[3] = rk_dim8_bPerm[3]; b[4] = rk_dim8_bPerm[4]; b[5] = rk_dim8_bPerm[5]; b[6] = rk_dim8_bPerm[6]; b[7] = rk_dim8_bPerm[7]; } /** Matrix of size: 2 x 2 (row major format) */ static const real_t acado_Ah_mat[ 4 ] = { 6.2500000000000003e-03, 1.3466878364870323e-02, -9.6687836487032168e-04, 6.2500000000000003e-03 }; /* Fixed step size:0.025 */ int acado_integrate( real_t* const rk_eta, int resetIntegrator ) { int error; int i; int j; int k; int run; int run1; int tmp_index1; int tmp_index2; real_t det; rk_ttt = 0.0000000000000000e+00; rk_xxx[4] = rk_eta[28]; rk_xxx[5] = rk_eta[29]; for (run = 0; run < 4; ++run) { if( run > 0 ) { for (i = 0; i < 4; ++i) { rk_diffsPrev2[i * 6] = rk_eta[i * 4 + 4]; rk_diffsPrev2[i * 6 + 1] = rk_eta[i * 4 + 5]; rk_diffsPrev2[i * 6 + 2] = rk_eta[i * 4 + 6]; rk_diffsPrev2[i * 6 + 3] = rk_eta[i * 4 + 7]; rk_diffsPrev2[i * 6 + 4] = rk_eta[i * 2 + 20]; rk_diffsPrev2[i * 6 + 5] = rk_eta[i * 2 + 21]; } } if( resetIntegrator ) { for (i = 0; i < 1; ++i) { for (run1 = 0; run1 < 2; ++run1) { for (j = 0; j < 4; ++j) { rk_xxx[j] = rk_eta[j]; tmp_index1 = j; rk_xxx[j] += + acado_Ah_mat[run1 * 2]*rk_kkk[tmp_index1 * 2]; rk_xxx[j] += + acado_Ah_mat[run1 * 2 + 1]*rk_kkk[tmp_index1 * 2 + 1]; } acado_diffs( rk_xxx, &(rk_diffsTemp2[ run1 * 24 ]) ); for (j = 0; j < 4; ++j) { tmp_index1 = (run1 * 4) + (j); rk_A[tmp_index1 * 8] = + acado_Ah_mat[run1 * 2]*rk_diffsTemp2[(run1 * 24) + (j * 6)]; rk_A[tmp_index1 * 8 + 1] = + acado_Ah_mat[run1 * 2]*rk_diffsTemp2[(run1 * 24) + (j * 6 + 1)]; rk_A[tmp_index1 * 8 + 2] = + acado_Ah_mat[run1 * 2]*rk_diffsTemp2[(run1 * 24) + (j * 6 + 2)]; rk_A[tmp_index1 * 8 + 3] = + acado_Ah_mat[run1 * 2]*rk_diffsTemp2[(run1 * 24) + (j * 6 + 3)]; if( 0 == run1 ) rk_A[(tmp_index1 * 8) + (j)] -= 1.0000000000000000e+00; rk_A[tmp_index1 * 8 + 4] = + acado_Ah_mat[run1 * 2 + 1]*rk_diffsTemp2[(run1 * 24) + (j * 6)]; rk_A[tmp_index1 * 8 + 5] = + acado_Ah_mat[run1 * 2 + 1]*rk_diffsTemp2[(run1 * 24) + (j * 6 + 1)]; rk_A[tmp_index1 * 8 + 6] = + acado_Ah_mat[run1 * 2 + 1]*rk_diffsTemp2[(run1 * 24) + (j * 6 + 2)]; rk_A[tmp_index1 * 8 + 7] = + acado_Ah_mat[run1 * 2 + 1]*rk_diffsTemp2[(run1 * 24) + (j * 6 + 3)]; if( 1 == run1 ) rk_A[(tmp_index1 * 8) + (j + 4)] -= 1.0000000000000000e+00; } acado_rhs( rk_xxx, rk_rhsTemp ); rk_b[run1 * 4] = rk_kkk[run1] - rk_rhsTemp[0]; rk_b[run1 * 4 + 1] = rk_kkk[run1 + 2] - rk_rhsTemp[1]; rk_b[run1 * 4 + 2] = rk_kkk[run1 + 4] - rk_rhsTemp[2]; rk_b[run1 * 4 + 3] = rk_kkk[run1 + 6] - rk_rhsTemp[3]; } det = acado_solve_dim8_system( rk_A, rk_b, rk_dim8_perm ); for (j = 0; j < 2; ++j) { rk_kkk[j] += rk_b[j * 4]; rk_kkk[j + 2] += rk_b[j * 4 + 1]; rk_kkk[j + 4] += rk_b[j * 4 + 2]; rk_kkk[j + 6] += rk_b[j * 4 + 3]; } } } for (i = 0; i < 5; ++i) { for (run1 = 0; run1 < 2; ++run1) { for (j = 0; j < 4; ++j) { rk_xxx[j] = rk_eta[j]; tmp_index1 = j; rk_xxx[j] += + acado_Ah_mat[run1 * 2]*rk_kkk[tmp_index1 * 2]; rk_xxx[j] += + acado_Ah_mat[run1 * 2 + 1]*rk_kkk[tmp_index1 * 2 + 1]; } acado_rhs( rk_xxx, rk_rhsTemp ); rk_b[run1 * 4] = rk_kkk[run1] - rk_rhsTemp[0]; rk_b[run1 * 4 + 1] = rk_kkk[run1 + 2] - rk_rhsTemp[1]; rk_b[run1 * 4 + 2] = rk_kkk[run1 + 4] - rk_rhsTemp[2]; rk_b[run1 * 4 + 3] = rk_kkk[run1 + 6] - rk_rhsTemp[3]; } acado_solve_dim8_system_reuse( rk_A, rk_b, rk_dim8_perm ); for (j = 0; j < 2; ++j) { rk_kkk[j] += rk_b[j * 4]; rk_kkk[j + 2] += rk_b[j * 4 + 1]; rk_kkk[j + 4] += rk_b[j * 4 + 2]; rk_kkk[j + 6] += rk_b[j * 4 + 3]; } } for (run1 = 0; run1 < 2; ++run1) { for (j = 0; j < 4; ++j) { rk_xxx[j] = rk_eta[j]; tmp_index1 = j; rk_xxx[j] += + acado_Ah_mat[run1 * 2]*rk_kkk[tmp_index1 * 2]; rk_xxx[j] += + acado_Ah_mat[run1 * 2 + 1]*rk_kkk[tmp_index1 * 2 + 1]; } acado_diffs( rk_xxx, &(rk_diffsTemp2[ run1 * 24 ]) ); for (j = 0; j < 4; ++j) { tmp_index1 = (run1 * 4) + (j); rk_A[tmp_index1 * 8] = + acado_Ah_mat[run1 * 2]*rk_diffsTemp2[(run1 * 24) + (j * 6)]; rk_A[tmp_index1 * 8 + 1] = + acado_Ah_mat[run1 * 2]*rk_diffsTemp2[(run1 * 24) + (j * 6 + 1)]; rk_A[tmp_index1 * 8 + 2] = + acado_Ah_mat[run1 * 2]*rk_diffsTemp2[(run1 * 24) + (j * 6 + 2)]; rk_A[tmp_index1 * 8 + 3] = + acado_Ah_mat[run1 * 2]*rk_diffsTemp2[(run1 * 24) + (j * 6 + 3)]; if( 0 == run1 ) rk_A[(tmp_index1 * 8) + (j)] -= 1.0000000000000000e+00; rk_A[tmp_index1 * 8 + 4] = + acado_Ah_mat[run1 * 2 + 1]*rk_diffsTemp2[(run1 * 24) + (j * 6)]; rk_A[tmp_index1 * 8 + 5] = + acado_Ah_mat[run1 * 2 + 1]*rk_diffsTemp2[(run1 * 24) + (j * 6 + 1)]; rk_A[tmp_index1 * 8 + 6] = + acado_Ah_mat[run1 * 2 + 1]*rk_diffsTemp2[(run1 * 24) + (j * 6 + 2)]; rk_A[tmp_index1 * 8 + 7] = + acado_Ah_mat[run1 * 2 + 1]*rk_diffsTemp2[(run1 * 24) + (j * 6 + 3)]; if( 1 == run1 ) rk_A[(tmp_index1 * 8) + (j + 4)] -= 1.0000000000000000e+00; } } for (run1 = 0; run1 < 4; ++run1) { for (i = 0; i < 2; ++i) { rk_b[i * 4] = - rk_diffsTemp2[(i * 24) + (run1)]; rk_b[i * 4 + 1] = - rk_diffsTemp2[(i * 24) + (run1 + 6)]; rk_b[i * 4 + 2] = - rk_diffsTemp2[(i * 24) + (run1 + 12)]; rk_b[i * 4 + 3] = - rk_diffsTemp2[(i * 24) + (run1 + 18)]; } if( 0 == run1 ) { det = acado_solve_dim8_system( rk_A, rk_b, rk_dim8_perm ); } else { acado_solve_dim8_system_reuse( rk_A, rk_b, rk_dim8_perm ); } for (i = 0; i < 2; ++i) { rk_diffK[i] = rk_b[i * 4]; rk_diffK[i + 2] = rk_b[i * 4 + 1]; rk_diffK[i + 4] = rk_b[i * 4 + 2]; rk_diffK[i + 6] = rk_b[i * 4 + 3]; } for (i = 0; i < 4; ++i) { rk_diffsNew2[(i * 6) + (run1)] = (i == run1-0); rk_diffsNew2[(i * 6) + (run1)] += + rk_diffK[i * 2]*(real_t)1.2500000000000001e-02 + rk_diffK[i * 2 + 1]*(real_t)1.2500000000000001e-02; } } for (run1 = 0; run1 < 2; ++run1) { for (i = 0; i < 2; ++i) { for (j = 0; j < 4; ++j) { tmp_index1 = (i * 4) + (j); tmp_index2 = (run1) + (j * 6); rk_b[tmp_index1] = - rk_diffsTemp2[(i * 24) + (tmp_index2 + 4)]; } } acado_solve_dim8_system_reuse( rk_A, rk_b, rk_dim8_perm ); for (i = 0; i < 2; ++i) { rk_diffK[i] = rk_b[i * 4]; rk_diffK[i + 2] = rk_b[i * 4 + 1]; rk_diffK[i + 4] = rk_b[i * 4 + 2]; rk_diffK[i + 6] = rk_b[i * 4 + 3]; } for (i = 0; i < 4; ++i) { rk_diffsNew2[(i * 6) + (run1 + 4)] = + rk_diffK[i * 2]*(real_t)1.2500000000000001e-02 + rk_diffK[i * 2 + 1]*(real_t)1.2500000000000001e-02; } } rk_eta[0] += + rk_kkk[0]*(real_t)1.2500000000000001e-02 + rk_kkk[1]*(real_t)1.2500000000000001e-02; rk_eta[1] += + rk_kkk[2]*(real_t)1.2500000000000001e-02 + rk_kkk[3]*(real_t)1.2500000000000001e-02; rk_eta[2] += + rk_kkk[4]*(real_t)1.2500000000000001e-02 + rk_kkk[5]*(real_t)1.2500000000000001e-02; rk_eta[3] += + rk_kkk[6]*(real_t)1.2500000000000001e-02 + rk_kkk[7]*(real_t)1.2500000000000001e-02; if( run == 0 ) { for (i = 0; i < 4; ++i) { for (j = 0; j < 4; ++j) { tmp_index2 = (j) + (i * 4); rk_eta[tmp_index2 + 4] = rk_diffsNew2[(i * 6) + (j)]; } for (j = 0; j < 2; ++j) { tmp_index2 = (j) + (i * 2); rk_eta[tmp_index2 + 20] = rk_diffsNew2[(i * 6) + (j + 4)]; } } } else { for (i = 0; i < 4; ++i) { for (j = 0; j < 4; ++j) { tmp_index2 = (j) + (i * 4); rk_eta[tmp_index2 + 4] = + rk_diffsNew2[i * 6]*rk_diffsPrev2[j]; rk_eta[tmp_index2 + 4] += + rk_diffsNew2[i * 6 + 1]*rk_diffsPrev2[j + 6]; rk_eta[tmp_index2 + 4] += + rk_diffsNew2[i * 6 + 2]*rk_diffsPrev2[j + 12]; rk_eta[tmp_index2 + 4] += + rk_diffsNew2[i * 6 + 3]*rk_diffsPrev2[j + 18]; } for (j = 0; j < 2; ++j) { tmp_index2 = (j) + (i * 2); rk_eta[tmp_index2 + 20] = rk_diffsNew2[(i * 6) + (j + 4)]; rk_eta[tmp_index2 + 20] += + rk_diffsNew2[i * 6]*rk_diffsPrev2[j + 4]; rk_eta[tmp_index2 + 20] += + rk_diffsNew2[i * 6 + 1]*rk_diffsPrev2[j + 10]; rk_eta[tmp_index2 + 20] += + rk_diffsNew2[i * 6 + 2]*rk_diffsPrev2[j + 16]; rk_eta[tmp_index2 + 20] += + rk_diffsNew2[i * 6 + 3]*rk_diffsPrev2[j + 22]; } } } resetIntegrator = 0; rk_ttt += 2.5000000000000000e-01; } for (i = 0; i < 4; ++i) { } if( det < 1e-12 ) { error = 2; } else if( det < 1e-6 ) { error = 1; } else { error = 0; } return error; }
softmax-inl.h
#include "hip/hip_runtime.h" /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2017 by Contributors * \file softmax-inl.h * \brief */ #ifndef MXNET_OPERATOR_NN_SOFTMAX_INL_H_ #define MXNET_OPERATOR_NN_SOFTMAX_INL_H_ #include <algorithm> #include <string> #include <utility> #include <vector> #include "../mxnet_op.h" #include "../operator_common.h" #include "../tensor/broadcast_reduce_op.h" namespace mxnet { namespace op { namespace mxnet_op { struct softmax_fwd { template<typename AType> MSHADOW_XINLINE static AType Map(float a, AType b) { return AType(expf(a)/b); } template<typename AType> MSHADOW_XINLINE static AType Map(double a, AType b) { return AType(exp(a)/b); } }; struct log_softmax_fwd { template<typename DType> MSHADOW_XINLINE static float Map(DType a, float b) { return a - logf(b); } template<typename DType> MSHADOW_XINLINE static double Map(DType a, double b) { return a - log(b); } }; template<typename OP, bool negate, typename AType, typename DType, typename OType, int ndim> inline void Softmax(Stream<cpu> *s, DType *in, OType *out, Shape<ndim> shape, int axis, const DType temperature) { index_t M = shape[axis]; index_t N = shape.Size()/M; Shape<ndim> stride = calc_stride(shape); Shape<ndim> sshape = shape; sshape[axis] = 1; index_t sa = stride[axis]; #pragma omp parallel for for (int i = 0; i < static_cast<int>(N); ++i) { index_t base = unravel_dot(i, sshape, stride); DType mmax = negate ? -in[base] : in[base]; DType val; for (index_t j = 1; j < M; ++j) { val = negate ? -in[base + j*sa] : in[base + j*sa]; if (mmax < val) mmax = val; } AType sum = AType(0); DType in_val; // By default temperature is 1.0. // Adding a branch here to save the CPU 'divide-by-1' computation at runtime if (temperature == 1.0) { for (index_t j = 0; j < M; ++j) { in_val = negate ? -in[base + j*sa] : in[base + j*sa]; sum += std::exp(in_val - mmax); } for (index_t j = 0; j < M; ++j) { in_val = negate ? -in[base + j*sa] : in[base + j*sa]; out[base + j*sa] = OP::Map(in_val - mmax, sum); } } else { for (index_t j = 0; j < M; ++j) { in_val = negate ? -in[base + j*sa] : in[base + j*sa]; sum += std::exp((in_val - mmax)/temperature); } for (index_t j = 0; j < M; ++j) { in_val = negate ? -in[base + j*sa] : in[base + j*sa]; out[base + j*sa] = OP::Map((in_val - mmax)/temperature, sum); } } } } struct softmax_bwd { template<typename DType, typename AType> MSHADOW_XINLINE static AType Map(DType ograd, DType out, AType sum) { return AType(out * (ograd - sum)); } }; struct log_softmax_bwd { template<typename AType> MSHADOW_XINLINE static AType Map(float ograd, float out, AType sum) { return AType(ograd - expf(out)*sum); } template<typename AType> MSHADOW_XINLINE static AType Map(double ograd, double out, AType sum) { return AType(ograd - exp(out)*sum); } }; template<typename OP1, typename OP2, int Req, bool negate, typename AType, typename DType, typename OType, int ndim> inline void SoftmaxGrad(Stream<cpu> *s, OType *out, OType *ograd, DType *igrad, Shape<ndim> shape, int axis, const DType temperature) { index_t M = shape[axis]; index_t N = shape.Size()/M; Shape<ndim> stride = calc_stride(shape); Shape<ndim> sshape = shape; sshape[axis] = 1; index_t sa = stride[axis]; #pragma omp parallel for for (int i = 0; i < static_cast<int>(N); ++i) { index_t base = unravel_dot(i, sshape, stride); AType sum = AType(0); for (index_t j = 0; j < M; ++j) { sum += OP1::Map(ograd[base + j*sa], out[base + j*sa]); } // By default temperature is 1.0. // Adding a branch here to save the CPU 'divide-by-1' computation at runtime DType final_result; if (temperature == 1.0) { for (index_t j = 0; j < M; ++j) { final_result = negate ? -OP2::Map(ograd[base + j*sa], out[base + j*sa], sum) : OP2::Map(ograd[base + j*sa], out[base + j*sa], sum); KERNEL_ASSIGN(igrad[base + j*sa], Req, final_result); } } else { for (index_t j = 0; j < M; ++j) { final_result = negate ? -OP2::Map(ograd[base + j*sa], out[base + j*sa], sum) / temperature : OP2::Map(ograd[base + j*sa], out[base + j*sa], sum) / temperature; KERNEL_ASSIGN(igrad[base + j*sa], Req, final_result); } } } } #ifdef __HIPCC__ template<int x_bits, typename OP, bool negate, typename AType, int ndim, typename DType, typename OType> __global__ void softmax_compute_kernel(DType *in, OType *out, index_t M, int axis, Shape<ndim> sshape, Shape<ndim> stride, const double temperature) { const unsigned x_size = 1 << x_bits; __shared__ AType smem[x_size]; index_t sa = stride[axis]; index_t base = unravel_dot(blockIdx.x, sshape, stride); index_t x = threadIdx.x; red::maximum::SetInitValue(smem[x]); for (index_t i = x; i < M; i += x_size) { smem[x] = ::max(smem[x], negate ? -in[base + i*sa] : in[base + i*sa]); } __syncthreads(); cuda::Reduce1D<red::maximum, x_bits>(smem); __syncthreads(); DType smax = smem[0]; __syncthreads(); red::sum::SetInitValue(smem[x]); DType val; for (index_t i = x; i < M; i += x_size) { val = negate ? -in[base + i*sa]:in[base + i*sa]; smem[x] += static_cast<AType>(expf((val - smax) / static_cast<AType>(temperature))); } __syncthreads(); cuda::Reduce1D<red::sum, x_bits>(smem); __syncthreads(); AType ssum = smem[0]; __syncthreads(); for (index_t i = x; i < M; i += x_size) { val = negate ? -in[base + i*sa] : in[base + i*sa]; out[base + i*sa] = OP::Map((val - smax)/static_cast<DType>(temperature), ssum); } } template<typename OP, bool negate, typename AType, typename DType, typename OType, int ndim> inline void Softmax(Stream<gpu> *s, DType *in, OType *out, Shape<ndim> shape, int axis, const double temperature) { const int x_bits = 7; const int x_size = 1 << x_bits; index_t M = shape[axis]; index_t N = shape.Size()/M; Shape<ndim> stride = calc_stride(shape); Shape<ndim> sshape = shape; sshape[axis] = 1; hipLaunchKernelGGL((softmax_compute_kernel<x_bits, OP, negate, AType, ndim>), dim3(N), dim3(x_size), 0, mshadow::Stream<gpu>::GetStream(s), in, out, M, axis, sshape, stride, temperature); MSHADOW_CUDA_POST_KERNEL_CHECK(softmax_compute_kernel); } template<int x_bits, typename OP1, typename OP2, int Req, bool negate, typename AType, int ndim, typename DType, typename OType> __global__ void softmax_gradient_kernel(OType *out, OType *ograd, DType *igrad, index_t M, int axis, Shape<ndim> sshape, Shape<ndim> stride, const double temperature) { const unsigned x_size = 1 << x_bits; __shared__ AType smem[x_size]; index_t sa = stride[axis]; index_t base = unravel_dot(blockIdx.x, sshape, stride); index_t x = threadIdx.x; red::sum::SetInitValue(smem[x]); for (index_t i = x; i < M; i += x_size) { smem[x] += OP1::Map(ograd[base + i*sa], out[base + i*sa]); } __syncthreads(); cuda::Reduce1D<red::sum, x_bits>(smem); __syncthreads(); AType ssum = smem[0]; __syncthreads(); DType final_result; for (index_t i = x; i < M; i += x_size) { final_result = negate ? -OP2::Map(ograd[base + i*sa], out[base + i*sa], ssum) : OP2::Map(ograd[base + i*sa], out[base + i*sa], ssum); KERNEL_ASSIGN(igrad[base + i*sa], Req, final_result / static_cast<DType>(temperature)); } } template<typename OP1, typename OP2, int Req, bool negate, typename AType, int ndim, typename DType, typename OType> inline void SoftmaxGrad(Stream<gpu> *s, OType *out, OType *ograd, DType *igrad, Shape<ndim> shape, int axis, const double temperature) { const int x_bits = 7; const int x_size = 1 << x_bits; index_t M = shape[axis]; index_t N = shape.Size()/M; Shape<ndim> stride = calc_stride(shape); Shape<ndim> sshape = shape; sshape[axis] = 1; hipLaunchKernelGGL((softmax_gradient_kernel<x_bits, OP1, OP2, Req, negate, AType, ndim>), dim3(N), dim3(x_size), 0, mshadow::Stream<gpu>::GetStream(s), out, ograd, igrad, M, axis, sshape, stride, temperature); MSHADOW_CUDA_POST_KERNEL_CHECK(softmax_gradient_kernel); } #endif } // namespace mxnet_op struct SoftmaxParam : public dmlc::Parameter<SoftmaxParam> { int axis; dmlc::optional<double> temperature; dmlc::optional<int> dtype; DMLC_DECLARE_PARAMETER(SoftmaxParam) { DMLC_DECLARE_FIELD(axis).set_default(-1) .describe("The axis along which to compute softmax."); DMLC_DECLARE_FIELD(temperature).set_default(dmlc::optional<double>()) .describe("Temperature parameter in softmax"); DMLC_DECLARE_FIELD(dtype) .add_enum("float16", mshadow::kFloat16) .add_enum("float32", mshadow::kFloat32) .add_enum("float64", mshadow::kFloat64) .set_default(dmlc::optional<int>()) .describe("DType of the output in case this can't be inferred. " "Defaults to the same as input's dtype if not defined (dtype=None)."); } }; static inline bool softmax_has_dtype_override(const nnvm::NodeAttrs& attrs) { const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed); return param.dtype.has_value() && param.dtype.value() != -1; } static inline bool SoftmaxOpType(const nnvm::NodeAttrs& attrs, std::vector<int>* in_attrs, std::vector<int>* out_attrs) { CHECK_EQ(in_attrs->size(), 1); CHECK_EQ(out_attrs->size(), 1); const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed); if (softmax_has_dtype_override(attrs)) { TYPE_ASSIGN_CHECK(*out_attrs, 0, param.dtype.value()); type_assign(&(*in_attrs)[0], (*out_attrs)[0]); return true; } else { return ElemwiseType<1, 1>(attrs, in_attrs, out_attrs); } } static inline bool SoftmaxGradOpShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector *in_attrs, mxnet::ShapeVector *out_attrs) { if (softmax_has_dtype_override(attrs)) { return ElemwiseShape<3, 1>(attrs, in_attrs, out_attrs); } else { return ElemwiseShape<2, 1>(attrs, in_attrs, out_attrs); } } static inline bool SoftmaxGradOpType(const nnvm::NodeAttrs& attrs, std::vector<int>* in_attrs, std::vector<int>* out_attrs) { CHECK_EQ(out_attrs->size(), 1); if (softmax_has_dtype_override(attrs)) { CHECK_EQ(in_attrs->size(), 3); int in_dtype = (*in_attrs)[1]; int out_dtype = (*in_attrs)[2]; TYPE_ASSIGN_CHECK(*in_attrs, 0, out_dtype); TYPE_ASSIGN_CHECK(*out_attrs, 0, in_dtype); return (*out_attrs)[0] != -1 && (*in_attrs)[0] != -1; } else { CHECK_EQ(in_attrs->size(), 2); int out_dtype = (*in_attrs)[1]; TYPE_ASSIGN_CHECK(*out_attrs, 0, out_dtype); TYPE_ASSIGN_CHECK(*in_attrs, 0, out_dtype); return (*out_attrs)[0] != -1 && (*in_attrs)[0] != -1; } } static inline std::vector<std::pair<int, int> > SoftmaxGradOpInplaceOption(const nnvm::NodeAttrs& attrs) { if (softmax_has_dtype_override(attrs)) { return std::vector<std::pair<int, int> >{{0, 0}, {1, 0}, {2, 0}}; } else { return std::vector<std::pair<int, int> >{{0, 0}, {1, 0}}; } } static inline uint32_t SoftmaxGradOpNumInputs(const nnvm::NodeAttrs& attrs) { return softmax_has_dtype_override(attrs) ? 3 : 2; } static inline std::vector<std::string> SoftmaxGradOpInputNames(const nnvm::NodeAttrs& attrs) { if (softmax_has_dtype_override(attrs)) { return std::vector<std::string>{"ograd", "data", "output"}; } else { return std::vector<std::string>{"ograd", "output"}; } } struct SoftmaxFGradient { const char *op_name; std::vector<nnvm::NodeEntry> operator()(const nnvm::NodePtr& n, const std::vector<nnvm::NodeEntry>& ograds) const { if (softmax_has_dtype_override(n->attrs)) { return ElemwiseGradUseInOut {op_name}(n, ograds); } else { return ElemwiseGradUseOut {op_name}(n, ograds); } } }; template<typename xpu, typename OP, bool negate = false> void SoftmaxCompute(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { using namespace mxnet_op; if (req[0] == kNullOp) return; CHECK_NE(req[0], kAddTo); const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed); int axis = CheckAxis(param.axis, inputs[0].ndim()); const double temperature = param.temperature.has_value() ? param.temperature.value() : 1.0; mxnet::TShape shape = AxisShapeCompact(inputs[0].shape_, &axis, true); MXNET_REAL_ACC_TYPE_SWITCH(inputs[0].type_flag_, DType, AType, { MSHADOW_REAL_TYPE_SWITCH(outputs[0].type_flag_, OType, { if (shape.ndim() == 2) { Softmax<OP, negate, AType>( ctx.get_stream<xpu>(), inputs[0].dptr<DType>(), outputs[0].dptr<OType>(), shape.get<2>(), axis, static_cast<DType>(temperature)); } else { Softmax<OP, negate, AType>( ctx.get_stream<xpu>(), inputs[0].dptr<DType>(), outputs[0].dptr<OType>(), shape.get<3>(), axis, static_cast<DType>(temperature)); } }); }); } template<typename xpu, typename OP1, typename OP2, bool negate = false> void SoftmaxGradCompute(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { using namespace mxnet_op; if (req[0] == kNullOp) return; const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed); int axis = CheckAxis(param.axis, inputs[0].ndim()); const double temperature = param.temperature.has_value() ? param.temperature.value() : 1.0; mxnet::TShape shape = AxisShapeCompact(inputs[0].shape_, &axis, true); int out_idx = softmax_has_dtype_override(attrs) ? 2 : 1; MXNET_REAL_ACC_TYPE_SWITCH(inputs[0].type_flag_, OType, AType, { MSHADOW_REAL_TYPE_SWITCH(outputs[0].type_flag_, DType, { MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { if (shape.ndim() == 2) { SoftmaxGrad<OP1, OP2, Req, negate, AType>( ctx.get_stream<xpu>(), inputs[out_idx].dptr<OType>(), inputs[0].dptr<OType>(), outputs[0].dptr<DType>(), shape.get<2>(), axis, static_cast<DType>(temperature)); } else { SoftmaxGrad<OP1, OP2, Req, negate, AType>( ctx.get_stream<xpu>(), inputs[out_idx].dptr<OType>(), inputs[0].dptr<OType>(), outputs[0].dptr<DType>(), shape.get<3>(), axis, static_cast<DType>(temperature)); } }); }); }); } } // namespace op } // namespace mxnet #endif // MXNET_OPERATOR_NN_SOFTMAX_INL_H_
GB_unop__identity_uint32_fc64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_uint32_fc64) // op(A') function: GB (_unop_tran__identity_uint32_fc64) // C type: uint32_t // A type: GxB_FC64_t // cast: uint32_t cij = GB_cast_to_uint32_t (creal (aij)) // unaryop: cij = aij #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ uint32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint32_t z = GB_cast_to_uint32_t (creal (aij)) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint32_t z = GB_cast_to_uint32_t (creal (aij)) ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT32 || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_uint32_fc64) ( uint32_t *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; uint32_t z = GB_cast_to_uint32_t (creal (aij)) ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC64_t aij = Ax [p] ; uint32_t z = GB_cast_to_uint32_t (creal (aij)) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_uint32_fc64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
engine.c
#include "mana/core/engine.h" int engine_init(struct Engine* engine, struct EngineSettings engine_settings) { engine->engine_settings = engine_settings; const char* graphics_lbrary_extensions[MAX_GRAPHICS_LIBRARY_EXTENSIONS] = {0}; uint32_t graphics_library_extension_count; int graphics_library_error = graphics_library_init(&engine->graphics_library, engine_settings.graphics_library_type, graphics_lbrary_extensions, &graphics_library_extension_count); switch (graphics_library_error) { case (GRAPHICS_LIBRARY_SUCCESS): break; case (GRAPHICS_LIBRARY_GLFW_ERROR): fprintf(stderr, "Failed to setup glfw for engine!\n"); return ENGINE_GRAPHICS_LIBRARY_ERROR; default: fprintf(stderr, "Unknown graphics library error! Error code: %d\n", graphics_library_error); return ENGINE_GRAPHICS_LIBRARY_ERROR; } int gpu_api_error = gpu_api_init(&engine->gpu_api, engine_settings.gpu_api_type, &engine->graphics_library, graphics_lbrary_extensions, &graphics_library_extension_count); switch (gpu_api_error) { case (GPU_API_SUCCESS): break; case (GPU_API_VULKAN_ERROR): fprintf(stderr, "Failed to setup Vulkan API for engine!\n"); return ENGINE_GPU_API_ERROR; default: fprintf(stderr, "Unknown gpu api error! Error code: %d\n", gpu_api_error); return ENGINE_GPU_API_ERROR; } return ENGINE_SUCCESS; } void engine_delete(struct Engine* engine) { gpu_api_delete(&engine->gpu_api); graphics_library_delete(&engine->graphics_library); } double engine_get_time() { struct timespec current_time; timespec_get(&current_time, TIME_UTC); double time = (double)current_time.tv_sec + (double)current_time.tv_nsec / 1000000000; return time; } int engine_get_max_omp_threads() { int max_omp_threads = 1; #pragma omp parallel { #pragma omp single max_omp_threads = omp_get_num_threads(); } return max_omp_threads; }
ast-dump-openmp-teams-distribute-parallel-for-simd.c
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s void test_one(int x) { #pragma omp target #pragma omp teams distribute parallel for simd for (int i = 0; i < x; i++) ; } void test_two(int x, int y) { #pragma omp target #pragma omp teams distribute parallel for simd for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) ; } void test_three(int x, int y) { #pragma omp target #pragma omp teams distribute parallel for simd collapse(1) for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) ; } void test_four(int x, int y) { #pragma omp target #pragma omp teams distribute parallel for simd collapse(2) for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) ; } void test_five(int x, int y, int z) { #pragma omp target #pragma omp teams distribute parallel for simd collapse(2) for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) for (int i = 0; i < z; i++) ; } // CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK: |-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:3:1, line:8:1> line:3:6 test_one 'void (int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:22, line:8:1> // CHECK-NEXT: | `-OMPTargetDirective {{.*}} <line:4:1, col:19> // CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit> // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:6:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:5:1, col:47> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-CapturedStmt {{.*}} <col:1, col:47> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-OMPTeamsDistributeParallelForSimdDirective {{.*}} <col:1, col:47> // CHECK-NEXT: | | | | | `-CapturedStmt {{.*}} <line:6:3, line:7:5> // CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-CapturedStmt {{.*}} <line:6:3, line:7:5> // CHECK-NEXT: | | | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | | | |-ForStmt {{.*}} <line:6:3, line:7:5> // CHECK-NEXT: | | | | | | | | | |-DeclStmt {{.*}} <line:6:8, col:17> // CHECK-NEXT: | | | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | `-NullStmt {{.*}} <line:7:5> // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <line:5:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:5:1) *const restrict' // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <line:6:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:5:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:5:1) *const restrict' // CHECK-NEXT: | | | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | | | `-FieldDecl {{.*}} <line:6:23> col:23 implicit 'int &' // CHECK-NEXT: | | | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <col:3, line:7:5> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:6:8, col:17> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:7:5> // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:5:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:5:1) *const restrict' // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:6:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:4:1) *const restrict' // CHECK-NEXT: | | | | |-RecordDecl {{.*}} <line:5:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:6:23> col:23 implicit 'int &' // CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | |-CapturedStmt {{.*}} <col:3, line:7:5> // CHECK-NEXT: | | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | | |-ForStmt {{.*}} <line:6:3, line:7:5> // CHECK-NEXT: | | | | | | | | |-DeclStmt {{.*}} <line:6:8, col:17> // CHECK-NEXT: | | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-NullStmt {{.*}} <line:7:5> // CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <line:5:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:5:1) *const restrict' // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <line:6:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:5:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:5:1) *const restrict' // CHECK-NEXT: | | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | | `-FieldDecl {{.*}} <line:6:23> col:23 implicit 'int &' // CHECK-NEXT: | | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | |-ForStmt {{.*}} <col:3, line:7:5> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:6:8, col:17> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:7:5> // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:5:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:5:1) *const restrict' // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:6:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | |-OMPCapturedExprDecl {{.*}} <col:23> col:23 implicit used .capture_expr. 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | `-OMPCapturedExprDecl {{.*}} <col:3, <invalid sloc>> col:3 implicit used .capture_expr. 'int' // CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'int' '-' // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:3, col:26> 'int' '/' // CHECK-NEXT: | | | | | |-ParenExpr {{.*}} <col:3> 'int' // CHECK-NEXT: | | | | | | `-BinaryOperator {{.*}} <col:23, col:3> 'int' '-' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int' // CHECK-NEXT: | | | | | | `-ParenExpr {{.*}} <col:3> 'int' // CHECK-NEXT: | | | | | | `-BinaryOperator {{.*}} <col:16, <invalid sloc>> 'int' '+' // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:16, col:26> 'int' '-' // CHECK-NEXT: | | | | | | | |-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:4:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:6:23> col:23 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-OMPTeamsDistributeParallelForSimdDirective {{.*}} <line:5:1, col:47> // CHECK-NEXT: | | | `-CapturedStmt {{.*}} <line:6:3, line:7:5> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:6:3, line:7:5> // CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:6:3, line:7:5> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:6:8, col:17> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:7:5> // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:5:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:5:1) *const restrict' // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:6:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:5:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:5:1) *const restrict' // CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:6:23> col:23 implicit 'int &' // CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <col:3, line:7:5> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:6:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:7:5> // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:5:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:5:1) *const restrict' // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:6:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:4:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <line:5:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:6:23> col:23 implicit 'int &' // CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | |-CapturedStmt {{.*}} <col:3, line:7:5> // CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | |-ForStmt {{.*}} <line:6:3, line:7:5> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:6:8, col:17> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:7:5> // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:5:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:5:1) *const restrict' // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:6:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:5:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:5:1) *const restrict' // CHECK-NEXT: | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | `-FieldDecl {{.*}} <line:6:23> col:23 implicit 'int &' // CHECK-NEXT: | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | |-ForStmt {{.*}} <col:3, line:7:5> // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:6:8, col:17> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:7:5> // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:5:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:5:1) *const restrict' // CHECK-NEXT: | | | `-VarDecl {{.*}} <line:6:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | |-OMPCapturedExprDecl {{.*}} <col:23> col:23 implicit used .capture_expr. 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | `-OMPCapturedExprDecl {{.*}} <col:3, <invalid sloc>> col:3 implicit used .capture_expr. 'int' // CHECK-NEXT: | | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'int' '-' // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:3, col:26> 'int' '/' // CHECK-NEXT: | | | |-ParenExpr {{.*}} <col:3> 'int' // CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:23, col:3> 'int' '-' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int' // CHECK-NEXT: | | | | `-ParenExpr {{.*}} <col:3> 'int' // CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:16, <invalid sloc>> 'int' '+' // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:16, col:26> 'int' '-' // CHECK-NEXT: | | | | | |-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: |-FunctionDecl {{.*}} <line:10:1, line:16:1> line:10:6 test_two 'void (int, int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:22, col:26> col:26 used y 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:29, line:16:1> // CHECK-NEXT: | `-OMPTargetDirective {{.*}} <line:11:1, col:19> // CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit> // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:13:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:14:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:12:1, col:47> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-CapturedStmt {{.*}} <col:1, col:47> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-OMPTeamsDistributeParallelForSimdDirective {{.*}} <col:1, col:47> // CHECK-NEXT: | | | | | `-CapturedStmt {{.*}} <line:13:3, line:15:7> // CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-CapturedStmt {{.*}} <line:13:3, line:15:7> // CHECK-NEXT: | | | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | | | |-ForStmt {{.*}} <line:13:3, line:15:7> // CHECK-NEXT: | | | | | | | | | |-DeclStmt {{.*}} <line:13:8, col:17> // CHECK-NEXT: | | | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | `-ForStmt {{.*}} <line:14:5, line:15:7> // CHECK-NEXT: | | | | | | | | | |-DeclStmt {{.*}} <line:14:10, col:19> // CHECK-NEXT: | | | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | `-NullStmt {{.*}} <line:15:7> // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <line:12:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:12:1) *const restrict' // CHECK-NEXT: | | | | | | | | |-VarDecl {{.*}} <line:13:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <line:14:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | |-DeclRefExpr {{.*}} <line:13:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <line:14:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:12:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:12:1) *const restrict' // CHECK-NEXT: | | | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | | | |-FieldDecl {{.*}} <line:13:23> col:23 implicit 'int &' // CHECK-NEXT: | | | | | | | `-FieldDecl {{.*}} <line:14:25> col:25 implicit 'int &' // CHECK-NEXT: | | | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:13:3, line:15:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:13:8, col:17> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:14:5, line:15:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:14:10, col:19> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:15:7> // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:12:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:12:1) *const restrict' // CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:13:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:14:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:13:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:14:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:11:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:11:1) *const restrict' // CHECK-NEXT: | | | | |-RecordDecl {{.*}} <line:12:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:13:23> col:23 implicit 'int &' // CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:14:25> col:25 implicit 'int &' // CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | |-CapturedStmt {{.*}} <line:13:3, line:15:7> // CHECK-NEXT: | | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | | |-ForStmt {{.*}} <line:13:3, line:15:7> // CHECK-NEXT: | | | | | | | | |-DeclStmt {{.*}} <line:13:8, col:17> // CHECK-NEXT: | | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ForStmt {{.*}} <line:14:5, line:15:7> // CHECK-NEXT: | | | | | | | | |-DeclStmt {{.*}} <line:14:10, col:19> // CHECK-NEXT: | | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-NullStmt {{.*}} <line:15:7> // CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <line:12:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:12:1) *const restrict' // CHECK-NEXT: | | | | | | | |-VarDecl {{.*}} <line:13:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <line:14:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | |-DeclRefExpr {{.*}} <line:13:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <line:14:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:12:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:12:1) *const restrict' // CHECK-NEXT: | | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | | |-FieldDecl {{.*}} <line:13:23> col:23 implicit 'int &' // CHECK-NEXT: | | | | | | `-FieldDecl {{.*}} <line:14:25> col:25 implicit 'int &' // CHECK-NEXT: | | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | |-ForStmt {{.*}} <line:13:3, line:15:7> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:13:8, col:17> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:14:5, line:15:7> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:14:10, col:19> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:15:7> // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:12:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:12:1) *const restrict' // CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:13:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:14:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | |-OMPCapturedExprDecl {{.*}} <line:13:23> col:23 implicit used .capture_expr. 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | `-OMPCapturedExprDecl {{.*}} <col:3, <invalid sloc>> col:3 implicit used .capture_expr. 'int' // CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'int' '-' // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:3, col:26> 'int' '/' // CHECK-NEXT: | | | | | |-ParenExpr {{.*}} <col:3> 'int' // CHECK-NEXT: | | | | | | `-BinaryOperator {{.*}} <col:23, col:3> 'int' '-' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int' // CHECK-NEXT: | | | | | | `-ParenExpr {{.*}} <col:3> 'int' // CHECK-NEXT: | | | | | | `-BinaryOperator {{.*}} <col:16, <invalid sloc>> 'int' '+' // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:16, col:26> 'int' '-' // CHECK-NEXT: | | | | | | | |-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:14:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:11:1> col:1 implicit .global_tid. 'const int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:11:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:13:23> col:23 implicit 'int' // CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:14:25> col:25 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-OMPTeamsDistributeParallelForSimdDirective {{.*}} <line:12:1, col:47> // CHECK-NEXT: | | | `-CapturedStmt {{.*}} <line:13:3, line:15:7> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:13:3, line:15:7> // CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:13:3, line:15:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:13:8, col:17> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:14:5, line:15:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:14:10, col:19> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:15:7> // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:12:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:12:1) *const restrict' // CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:13:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:14:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:13:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:14:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:12:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:12:1) *const restrict' // CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:13:23> col:23 implicit 'int &' // CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:14:25> col:25 implicit 'int &' // CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:13:3, line:15:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:13:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:14:5, line:15:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:14:10, col:19> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:15:7> // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:12:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:12:1) *const restrict' // CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:13:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:14:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:13:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:14:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:11:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:11:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <line:12:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:13:23> col:23 implicit 'int &' // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:14:25> col:25 implicit 'int &' // CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | |-CapturedStmt {{.*}} <line:13:3, line:15:7> // CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | |-ForStmt {{.*}} <line:13:3, line:15:7> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:13:8, col:17> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:14:5, line:15:7> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:14:10, col:19> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:15:7> // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:12:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:12:1) *const restrict' // CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:13:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:14:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:13:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <line:14:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:12:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:12:1) *const restrict' // CHECK-NEXT: | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:13:23> col:23 implicit 'int &' // CHECK-NEXT: | | | | `-FieldDecl {{.*}} <line:14:25> col:25 implicit 'int &' // CHECK-NEXT: | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | |-ForStmt {{.*}} <line:13:3, line:15:7> // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:13:8, col:17> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:14:5, line:15:7> // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:14:10, col:19> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:15:7> // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:12:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:12:1) *const restrict' // CHECK-NEXT: | | | |-VarDecl {{.*}} <line:13:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | `-VarDecl {{.*}} <line:14:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | |-OMPCapturedExprDecl {{.*}} <line:13:23> col:23 implicit used .capture_expr. 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | `-OMPCapturedExprDecl {{.*}} <col:3, <invalid sloc>> col:3 implicit used .capture_expr. 'int' // CHECK-NEXT: | | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'int' '-' // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:3, col:26> 'int' '/' // CHECK-NEXT: | | | |-ParenExpr {{.*}} <col:3> 'int' // CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:23, col:3> 'int' '-' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int' // CHECK-NEXT: | | | | `-ParenExpr {{.*}} <col:3> 'int' // CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:16, <invalid sloc>> 'int' '+' // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:16, col:26> 'int' '-' // CHECK-NEXT: | | | | | |-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | |-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:14:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: |-FunctionDecl {{.*}} <line:18:1, line:24:1> line:18:6 test_three 'void (int, int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:17, col:21> col:21 used x 'int' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:24, col:28> col:28 used y 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:31, line:24:1> // CHECK-NEXT: | `-OMPTargetDirective {{.*}} <line:19:1, col:19> // CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit> // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:21:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:22:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:20:1, col:59> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-CapturedStmt {{.*}} <col:1, col:59> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-OMPTeamsDistributeParallelForSimdDirective {{.*}} <col:1, col:59> // CHECK-NEXT: | | | | | |-OMPCollapseClause {{.*}} <col:48, col:58> // CHECK-NEXT: | | | | | | `-ConstantExpr {{.*}} <col:57> 'int' // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:57> 'int' 1 // CHECK-NEXT: | | | | | `-CapturedStmt {{.*}} <line:21:3, line:23:7> // CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-CapturedStmt {{.*}} <line:21:3, line:23:7> // CHECK-NEXT: | | | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | | | |-ForStmt {{.*}} <line:21:3, line:23:7> // CHECK-NEXT: | | | | | | | | | |-DeclStmt {{.*}} <line:21:8, col:17> // CHECK-NEXT: | | | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | `-ForStmt {{.*}} <line:22:5, line:23:7> // CHECK-NEXT: | | | | | | | | | |-DeclStmt {{.*}} <line:22:10, col:19> // CHECK-NEXT: | | | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | `-NullStmt {{.*}} <line:23:7> // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <line:20:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:20:1) *const restrict' // CHECK-NEXT: | | | | | | | | |-VarDecl {{.*}} <line:21:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <line:22:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | |-DeclRefExpr {{.*}} <line:21:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <line:22:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:20:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:20:1) *const restrict' // CHECK-NEXT: | | | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | | | |-FieldDecl {{.*}} <line:21:23> col:23 implicit 'int &' // CHECK-NEXT: | | | | | | | `-FieldDecl {{.*}} <line:22:25> col:25 implicit 'int &' // CHECK-NEXT: | | | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:21:3, line:23:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:21:8, col:17> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:22:5, line:23:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:22:10, col:19> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:23:7> // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:20:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:20:1) *const restrict' // CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:21:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:22:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:21:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:22:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:19:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:19:1) *const restrict' // CHECK-NEXT: | | | | |-RecordDecl {{.*}} <line:20:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:21:23> col:23 implicit 'int &' // CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:22:25> col:25 implicit 'int &' // CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | |-CapturedStmt {{.*}} <line:21:3, line:23:7> // CHECK-NEXT: | | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | | |-ForStmt {{.*}} <line:21:3, line:23:7> // CHECK-NEXT: | | | | | | | | |-DeclStmt {{.*}} <line:21:8, col:17> // CHECK-NEXT: | | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ForStmt {{.*}} <line:22:5, line:23:7> // CHECK-NEXT: | | | | | | | | |-DeclStmt {{.*}} <line:22:10, col:19> // CHECK-NEXT: | | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-NullStmt {{.*}} <line:23:7> // CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <line:20:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:20:1) *const restrict' // CHECK-NEXT: | | | | | | | |-VarDecl {{.*}} <line:21:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <line:22:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | |-DeclRefExpr {{.*}} <line:21:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <line:22:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:20:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:20:1) *const restrict' // CHECK-NEXT: | | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | | |-FieldDecl {{.*}} <line:21:23> col:23 implicit 'int &' // CHECK-NEXT: | | | | | | `-FieldDecl {{.*}} <line:22:25> col:25 implicit 'int &' // CHECK-NEXT: | | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | |-ForStmt {{.*}} <line:21:3, line:23:7> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:21:8, col:17> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:22:5, line:23:7> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:22:10, col:19> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:23:7> // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:20:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:20:1) *const restrict' // CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:21:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:22:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | |-OMPCapturedExprDecl {{.*}} <line:21:23> col:23 implicit used .capture_expr. 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | `-OMPCapturedExprDecl {{.*}} <col:3, <invalid sloc>> col:3 implicit used .capture_expr. 'int' // CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'int' '-' // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:3, col:26> 'int' '/' // CHECK-NEXT: | | | | | |-ParenExpr {{.*}} <col:3> 'int' // CHECK-NEXT: | | | | | | `-BinaryOperator {{.*}} <col:23, col:3> 'int' '-' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int' // CHECK-NEXT: | | | | | | `-ParenExpr {{.*}} <col:3> 'int' // CHECK-NEXT: | | | | | | `-BinaryOperator {{.*}} <col:16, <invalid sloc>> 'int' '+' // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:16, col:26> 'int' '-' // CHECK-NEXT: | | | | | | | |-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:22:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:19:1> col:1 implicit .global_tid. 'const int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:19:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:21:23> col:23 implicit 'int' // CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:22:25> col:25 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-OMPTeamsDistributeParallelForSimdDirective {{.*}} <line:20:1, col:59> // CHECK-NEXT: | | | |-OMPCollapseClause {{.*}} <col:48, col:58> // CHECK-NEXT: | | | | `-ConstantExpr {{.*}} <col:57> 'int' // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:57> 'int' 1 // CHECK-NEXT: | | | `-CapturedStmt {{.*}} <line:21:3, line:23:7> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:21:3, line:23:7> // CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:21:3, line:23:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:21:8, col:17> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:22:5, line:23:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:22:10, col:19> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:23:7> // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:20:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:20:1) *const restrict' // CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:21:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:22:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:21:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:22:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:20:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:20:1) *const restrict' // CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:21:23> col:23 implicit 'int &' // CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:22:25> col:25 implicit 'int &' // CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:21:3, line:23:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:21:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:22:5, line:23:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:22:10, col:19> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:23:7> // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:20:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:20:1) *const restrict' // CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:21:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:22:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:21:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:22:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:19:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:19:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <line:20:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:21:23> col:23 implicit 'int &' // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:22:25> col:25 implicit 'int &' // CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | |-CapturedStmt {{.*}} <line:21:3, line:23:7> // CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | |-ForStmt {{.*}} <line:21:3, line:23:7> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:21:8, col:17> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:22:5, line:23:7> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:22:10, col:19> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:23:7> // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:20:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:20:1) *const restrict' // CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:21:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:22:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:21:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <line:22:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:20:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:20:1) *const restrict' // CHECK-NEXT: | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:21:23> col:23 implicit 'int &' // CHECK-NEXT: | | | | `-FieldDecl {{.*}} <line:22:25> col:25 implicit 'int &' // CHECK-NEXT: | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | |-ForStmt {{.*}} <line:21:3, line:23:7> // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:21:8, col:17> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:22:5, line:23:7> // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:22:10, col:19> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:23:7> // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:20:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:20:1) *const restrict' // CHECK-NEXT: | | | |-VarDecl {{.*}} <line:21:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | `-VarDecl {{.*}} <line:22:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | |-OMPCapturedExprDecl {{.*}} <line:21:23> col:23 implicit used .capture_expr. 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | `-OMPCapturedExprDecl {{.*}} <col:3, <invalid sloc>> col:3 implicit used .capture_expr. 'int' // CHECK-NEXT: | | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'int' '-' // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:3, col:26> 'int' '/' // CHECK-NEXT: | | | |-ParenExpr {{.*}} <col:3> 'int' // CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:23, col:3> 'int' '-' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int' // CHECK-NEXT: | | | | `-ParenExpr {{.*}} <col:3> 'int' // CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:16, <invalid sloc>> 'int' '+' // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:16, col:26> 'int' '-' // CHECK-NEXT: | | | | | |-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | |-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:22:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: |-FunctionDecl {{.*}} <line:26:1, line:32:1> line:26:6 test_four 'void (int, int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:30, line:32:1> // CHECK-NEXT: | `-OMPTargetDirective {{.*}} <line:27:1, col:19> // CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit> // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:29:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:30:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:28:1, col:59> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-CapturedStmt {{.*}} <col:1, col:59> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-OMPTeamsDistributeParallelForSimdDirective {{.*}} <col:1, col:59> // CHECK-NEXT: | | | | | |-OMPCollapseClause {{.*}} <col:48, col:58> // CHECK-NEXT: | | | | | | `-ConstantExpr {{.*}} <col:57> 'int' // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:57> 'int' 2 // CHECK-NEXT: | | | | | `-CapturedStmt {{.*}} <line:29:3, line:31:7> // CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-CapturedStmt {{.*}} <line:29:3, line:31:7> // CHECK-NEXT: | | | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | | | |-ForStmt {{.*}} <line:29:3, line:31:7> // CHECK-NEXT: | | | | | | | | | |-DeclStmt {{.*}} <line:29:8, col:17> // CHECK-NEXT: | | | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | `-ForStmt {{.*}} <line:30:5, line:31:7> // CHECK-NEXT: | | | | | | | | | |-DeclStmt {{.*}} <line:30:10, col:19> // CHECK-NEXT: | | | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | `-NullStmt {{.*}} <line:31:7> // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <line:28:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:28:1) *const restrict' // CHECK-NEXT: | | | | | | | | |-VarDecl {{.*}} <line:29:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <line:30:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | |-DeclRefExpr {{.*}} <line:29:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <line:30:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:28:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:28:1) *const restrict' // CHECK-NEXT: | | | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | | | |-FieldDecl {{.*}} <line:29:23> col:23 implicit 'int &' // CHECK-NEXT: | | | | | | | `-FieldDecl {{.*}} <line:30:25> col:25 implicit 'int &' // CHECK-NEXT: | | | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:29:3, line:31:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:29:8, col:17> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:30:5, line:31:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:30:10, col:19> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:31:7> // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:28:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:28:1) *const restrict' // CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:29:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:30:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:29:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:30:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:27:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:27:1) *const restrict' // CHECK-NEXT: | | | | |-RecordDecl {{.*}} <line:28:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:29:23> col:23 implicit 'int &' // CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:30:25> col:25 implicit 'int &' // CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | |-CapturedStmt {{.*}} <line:29:3, line:31:7> // CHECK-NEXT: | | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | | |-ForStmt {{.*}} <line:29:3, line:31:7> // CHECK-NEXT: | | | | | | | | |-DeclStmt {{.*}} <line:29:8, col:17> // CHECK-NEXT: | | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ForStmt {{.*}} <line:30:5, line:31:7> // CHECK-NEXT: | | | | | | | | |-DeclStmt {{.*}} <line:30:10, col:19> // CHECK-NEXT: | | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-NullStmt {{.*}} <line:31:7> // CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <line:28:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:28:1) *const restrict' // CHECK-NEXT: | | | | | | | |-VarDecl {{.*}} <line:29:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <line:30:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | |-DeclRefExpr {{.*}} <line:29:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <line:30:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:28:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:28:1) *const restrict' // CHECK-NEXT: | | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | | |-FieldDecl {{.*}} <line:29:23> col:23 implicit 'int &' // CHECK-NEXT: | | | | | | `-FieldDecl {{.*}} <line:30:25> col:25 implicit 'int &' // CHECK-NEXT: | | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | |-ForStmt {{.*}} <line:29:3, line:31:7> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:29:8, col:17> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:30:5, line:31:7> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:30:10, col:19> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:31:7> // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:28:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:28:1) *const restrict' // CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:29:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:30:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | |-OMPCapturedExprDecl {{.*}} <line:29:23> col:23 implicit used .capture_expr. 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | |-OMPCapturedExprDecl {{.*}} <line:30:25> col:25 implicit used .capture_expr. 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | `-OMPCapturedExprDecl {{.*}} <line:29:3, <invalid sloc>> col:3 implicit used .capture_expr. 'long' // CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'long' '-' // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:3, line:30:28> 'long' '*' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <line:29:3, col:26> 'long' <IntegralCast> // CHECK-NEXT: | | | | | | `-BinaryOperator {{.*}} <col:3, col:26> 'int' '/' // CHECK-NEXT: | | | | | | |-ParenExpr {{.*}} <col:3> 'int' // CHECK-NEXT: | | | | | | | `-BinaryOperator {{.*}} <col:23, col:3> 'int' '-' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int' // CHECK-NEXT: | | | | | | | `-ParenExpr {{.*}} <col:3> 'int' // CHECK-NEXT: | | | | | | | `-BinaryOperator {{.*}} <col:16, <invalid sloc>> 'int' '+' // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:16, col:26> 'int' '-' // CHECK-NEXT: | | | | | | | | |-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <line:30:5, col:28> 'long' <IntegralCast> // CHECK-NEXT: | | | | | `-BinaryOperator {{.*}} <col:5, col:28> 'int' '/' // CHECK-NEXT: | | | | | |-ParenExpr {{.*}} <col:5> 'int' // CHECK-NEXT: | | | | | | `-BinaryOperator {{.*}} <col:25, col:5> 'int' '-' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int' // CHECK-NEXT: | | | | | | `-ParenExpr {{.*}} <col:5> 'int' // CHECK-NEXT: | | | | | | `-BinaryOperator {{.*}} <col:18, <invalid sloc>> 'int' '+' // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:18, col:28> 'int' '-' // CHECK-NEXT: | | | | | | | |-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:28> 'int' 1 // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:28> 'int' 1 // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <<invalid sloc>> 'long' <IntegralCast> // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:29:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:30:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:27:1> col:1 implicit .global_tid. 'const int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:27:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:29:23> col:23 implicit 'int' // CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:30:25> col:25 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-OMPTeamsDistributeParallelForSimdDirective {{.*}} <line:28:1, col:59> // CHECK-NEXT: | | | |-OMPCollapseClause {{.*}} <col:48, col:58> // CHECK-NEXT: | | | | `-ConstantExpr {{.*}} <col:57> 'int' // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:57> 'int' 2 // CHECK-NEXT: | | | `-CapturedStmt {{.*}} <line:29:3, line:31:7> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:29:3, line:31:7> // CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:29:3, line:31:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:29:8, col:17> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:30:5, line:31:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:30:10, col:19> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:31:7> // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:28:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:28:1) *const restrict' // CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:29:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:30:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:29:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:30:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:28:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:28:1) *const restrict' // CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:29:23> col:23 implicit 'int &' // CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:30:25> col:25 implicit 'int &' // CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:29:3, line:31:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:29:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:30:5, line:31:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:30:10, col:19> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:31:7> // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:28:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:28:1) *const restrict' // CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:29:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:30:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:29:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:30:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:27:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:27:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <line:28:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:29:23> col:23 implicit 'int &' // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:30:25> col:25 implicit 'int &' // CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | |-CapturedStmt {{.*}} <line:29:3, line:31:7> // CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | |-ForStmt {{.*}} <line:29:3, line:31:7> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:29:8, col:17> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:30:5, line:31:7> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:30:10, col:19> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:31:7> // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:28:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:28:1) *const restrict' // CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:29:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:30:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:29:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <line:30:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:28:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:28:1) *const restrict' // CHECK-NEXT: | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:29:23> col:23 implicit 'int &' // CHECK-NEXT: | | | | `-FieldDecl {{.*}} <line:30:25> col:25 implicit 'int &' // CHECK-NEXT: | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | |-ForStmt {{.*}} <line:29:3, line:31:7> // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:29:8, col:17> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:30:5, line:31:7> // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:30:10, col:19> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:31:7> // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:28:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:28:1) *const restrict' // CHECK-NEXT: | | | |-VarDecl {{.*}} <line:29:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | `-VarDecl {{.*}} <line:30:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | |-OMPCapturedExprDecl {{.*}} <line:29:23> col:23 implicit used .capture_expr. 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | |-OMPCapturedExprDecl {{.*}} <line:30:25> col:25 implicit used .capture_expr. 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | `-OMPCapturedExprDecl {{.*}} <line:29:3, <invalid sloc>> col:3 implicit used .capture_expr. 'long' // CHECK-NEXT: | | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'long' '-' // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:3, line:30:28> 'long' '*' // CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <line:29:3, col:26> 'long' <IntegralCast> // CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:3, col:26> 'int' '/' // CHECK-NEXT: | | | | |-ParenExpr {{.*}} <col:3> 'int' // CHECK-NEXT: | | | | | `-BinaryOperator {{.*}} <col:23, col:3> 'int' '-' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int' // CHECK-NEXT: | | | | | `-ParenExpr {{.*}} <col:3> 'int' // CHECK-NEXT: | | | | | `-BinaryOperator {{.*}} <col:16, <invalid sloc>> 'int' '+' // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:16, col:26> 'int' '-' // CHECK-NEXT: | | | | | | |-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <line:30:5, col:28> 'long' <IntegralCast> // CHECK-NEXT: | | | `-BinaryOperator {{.*}} <col:5, col:28> 'int' '/' // CHECK-NEXT: | | | |-ParenExpr {{.*}} <col:5> 'int' // CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:25, col:5> 'int' '-' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int' // CHECK-NEXT: | | | | `-ParenExpr {{.*}} <col:5> 'int' // CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:18, <invalid sloc>> 'int' '+' // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:18, col:28> 'int' '-' // CHECK-NEXT: | | | | | |-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:28> 'int' 1 // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:28> 'int' 1 // CHECK-NEXT: | | `-ImplicitCastExpr {{.*}} <<invalid sloc>> 'long' <IntegralCast> // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:29:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:30:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: `-FunctionDecl {{.*}} <line:34:1, line:41:1> line:34:6 test_five 'void (int, int, int)' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:30, col:34> col:34 used z 'int' // CHECK-NEXT: `-CompoundStmt {{.*}} <col:37, line:41:1> // CHECK-NEXT: `-OMPTargetDirective {{.*}} <line:35:1, col:19> // CHECK-NEXT: |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit> // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:37:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:38:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:39:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: `-CapturedStmt {{.*}} <line:36:1, col:59> // CHECK-NEXT: |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | |-CapturedStmt {{.*}} <col:1, col:59> // CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | |-OMPTeamsDistributeParallelForSimdDirective {{.*}} <col:1, col:59> // CHECK-NEXT: | | | | |-OMPCollapseClause {{.*}} <col:48, col:58> // CHECK-NEXT: | | | | | `-ConstantExpr {{.*}} <col:57> 'int' // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:57> 'int' 2 // CHECK-NEXT: | | | | `-CapturedStmt {{.*}} <line:37:3, line:40:9> // CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | |-CapturedStmt {{.*}} <line:37:3, line:40:9> // CHECK-NEXT: | | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | | |-ForStmt {{.*}} <line:37:3, line:40:9> // CHECK-NEXT: | | | | | | | | |-DeclStmt {{.*}} <line:37:8, col:17> // CHECK-NEXT: | | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ForStmt {{.*}} <line:38:5, line:40:9> // CHECK-NEXT: | | | | | | | | |-DeclStmt {{.*}} <line:38:10, col:19> // CHECK-NEXT: | | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ForStmt {{.*}} <line:39:7, line:40:9> // CHECK-NEXT: | | | | | | | | |-DeclStmt {{.*}} <line:39:12, col:21> // CHECK-NEXT: | | | | | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<' // CHECK-NEXT: | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | | | | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-NullStmt {{.*}} <line:40:9> // CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <line:36:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:36:1) *const restrict' // CHECK-NEXT: | | | | | | | |-VarDecl {{.*}} <line:37:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | |-VarDecl {{.*}} <line:38:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <line:39:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | | | | |-DeclRefExpr {{.*}} <line:37:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | |-DeclRefExpr {{.*}} <line:38:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <line:39:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:36:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:36:1) *const restrict' // CHECK-NEXT: | | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | | |-FieldDecl {{.*}} <line:37:23> col:23 implicit 'int &' // CHECK-NEXT: | | | | | | |-FieldDecl {{.*}} <line:38:25> col:25 implicit 'int &' // CHECK-NEXT: | | | | | | `-FieldDecl {{.*}} <line:39:27> col:27 implicit 'int &' // CHECK-NEXT: | | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | |-ForStmt {{.*}} <line:37:3, line:40:9> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:37:8, col:17> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:38:5, line:40:9> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:38:10, col:19> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:39:7, line:40:9> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:39:12, col:21> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:40:9> // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:36:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:36:1) *const restrict' // CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:37:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:38:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:39:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:37:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:38:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <line:39:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:35:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:35:1) *const restrict' // CHECK-NEXT: | | | |-RecordDecl {{.*}} <line:36:1> col:1 implicit struct definition // CHECK-NEXT: | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:37:23> col:23 implicit 'int &' // CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:38:25> col:25 implicit 'int &' // CHECK-NEXT: | | | | `-FieldDecl {{.*}} <line:39:27> col:27 implicit 'int &' // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:37:3, line:40:9> // CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:37:3, line:40:9> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:37:8, col:17> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:38:5, line:40:9> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:38:10, col:19> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:39:7, line:40:9> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:39:12, col:21> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:40:9> // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:36:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:36:1) *const restrict' // CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:37:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:38:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:39:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:37:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:38:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:39:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:36:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:36:1) *const restrict' // CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:37:23> col:23 implicit 'int &' // CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:38:25> col:25 implicit 'int &' // CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:39:27> col:27 implicit 'int &' // CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:37:3, line:40:9> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:37:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:38:5, line:40:9> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:38:10, col:19> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:39:7, line:40:9> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:39:12, col:21> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:40:9> // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:36:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:36:1) *const restrict' // CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:37:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:38:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:39:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | |-OMPCapturedExprDecl {{.*}} <line:37:23> col:23 implicit used .capture_expr. 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-OMPCapturedExprDecl {{.*}} <line:38:25> col:25 implicit used .capture_expr. 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | `-OMPCapturedExprDecl {{.*}} <line:37:3, <invalid sloc>> col:3 implicit used .capture_expr. 'long' // CHECK-NEXT: | | | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'long' '-' // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:3, line:38:28> 'long' '*' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <line:37:3, col:26> 'long' <IntegralCast> // CHECK-NEXT: | | | | | `-BinaryOperator {{.*}} <col:3, col:26> 'int' '/' // CHECK-NEXT: | | | | | |-ParenExpr {{.*}} <col:3> 'int' // CHECK-NEXT: | | | | | | `-BinaryOperator {{.*}} <col:23, col:3> 'int' '-' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int' // CHECK-NEXT: | | | | | | `-ParenExpr {{.*}} <col:3> 'int' // CHECK-NEXT: | | | | | | `-BinaryOperator {{.*}} <col:16, <invalid sloc>> 'int' '+' // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:16, col:26> 'int' '-' // CHECK-NEXT: | | | | | | | |-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <line:38:5, col:28> 'long' <IntegralCast> // CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:5, col:28> 'int' '/' // CHECK-NEXT: | | | | |-ParenExpr {{.*}} <col:5> 'int' // CHECK-NEXT: | | | | | `-BinaryOperator {{.*}} <col:25, col:5> 'int' '-' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int' // CHECK-NEXT: | | | | | `-ParenExpr {{.*}} <col:5> 'int' // CHECK-NEXT: | | | | | `-BinaryOperator {{.*}} <col:18, <invalid sloc>> 'int' '+' // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:18, col:28> 'int' '-' // CHECK-NEXT: | | | | | | |-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:28> 'int' 1 // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:28> 'int' 1 // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <<invalid sloc>> 'long' <IntegralCast> // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:37:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:38:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:39:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:35:1> col:1 implicit .global_tid. 'const int' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:35:1) *const restrict' // CHECK-NEXT: | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | |-FieldDecl {{.*}} <line:37:23> col:23 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | |-FieldDecl {{.*}} <line:38:25> col:25 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | `-FieldDecl {{.*}} <line:39:27> col:27 implicit 'int' // CHECK-NEXT: | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | |-OMPTeamsDistributeParallelForSimdDirective {{.*}} <line:36:1, col:59> // CHECK-NEXT: | | |-OMPCollapseClause {{.*}} <col:48, col:58> // CHECK-NEXT: | | | `-ConstantExpr {{.*}} <col:57> 'int' // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:57> 'int' 2 // CHECK-NEXT: | | `-CapturedStmt {{.*}} <line:37:3, line:40:9> // CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | |-CapturedStmt {{.*}} <line:37:3, line:40:9> // CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | |-ForStmt {{.*}} <line:37:3, line:40:9> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:37:8, col:17> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:38:5, line:40:9> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:38:10, col:19> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:39:7, line:40:9> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:39:12, col:21> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:40:9> // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:36:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:36:1) *const restrict' // CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:37:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:38:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:39:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:37:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:38:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <line:39:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:36:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:36:1) *const restrict' // CHECK-NEXT: | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:37:23> col:23 implicit 'int &' // CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:38:25> col:25 implicit 'int &' // CHECK-NEXT: | | | | `-FieldDecl {{.*}} <line:39:27> col:27 implicit 'int &' // CHECK-NEXT: | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | |-ForStmt {{.*}} <line:37:3, line:40:9> // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:37:8, col:17> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:38:5, line:40:9> // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:38:10, col:19> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:39:7, line:40:9> // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:39:12, col:21> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:40:9> // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:36:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:36:1) *const restrict' // CHECK-NEXT: | | | |-VarDecl {{.*}} <line:37:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-VarDecl {{.*}} <line:38:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | `-VarDecl {{.*}} <line:39:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:37:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:38:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:39:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:35:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:35:1) *const restrict' // CHECK-NEXT: | |-RecordDecl {{.*}} <line:36:1> col:1 implicit struct definition // CHECK-NEXT: | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | |-FieldDecl {{.*}} <line:37:23> col:23 implicit 'int &' // CHECK-NEXT: | | |-FieldDecl {{.*}} <line:38:25> col:25 implicit 'int &' // CHECK-NEXT: | | `-FieldDecl {{.*}} <line:39:27> col:27 implicit 'int &' // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:37:3, line:40:9> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:37:3, line:40:9> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:37:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:38:5, line:40:9> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:38:10, col:19> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:39:7, line:40:9> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:39:12, col:21> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:40:9> // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:36:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:36:1) *const restrict' // CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:37:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:38:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:39:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:37:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:38:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:39:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:36:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:36:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:37:23> col:23 implicit 'int &' // CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:38:25> col:25 implicit 'int &' // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:39:27> col:27 implicit 'int &' // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-ForStmt {{.*}} <line:37:3, line:40:9> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:37:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ForStmt {{.*}} <line:38:5, line:40:9> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:38:10, col:19> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ForStmt {{.*}} <line:39:7, line:40:9> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:39:12, col:21> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:40:9> // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:36:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:36:1) *const restrict' // CHECK-NEXT: | | |-VarDecl {{.*}} <line:37:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | |-VarDecl {{.*}} <line:38:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | `-VarDecl {{.*}} <line:39:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | |-OMPCapturedExprDecl {{.*}} <line:37:23> col:23 implicit used .capture_expr. 'int' // CHECK-NEXT: | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | |-OMPCapturedExprDecl {{.*}} <line:38:25> col:25 implicit used .capture_expr. 'int' // CHECK-NEXT: | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | `-OMPCapturedExprDecl {{.*}} <line:37:3, <invalid sloc>> col:3 implicit used .capture_expr. 'long' // CHECK-NEXT: | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'long' '-' // CHECK-NEXT: | |-BinaryOperator {{.*}} <col:3, line:38:28> 'long' '*' // CHECK-NEXT: | | |-ImplicitCastExpr {{.*}} <line:37:3, col:26> 'long' <IntegralCast> // CHECK-NEXT: | | | `-BinaryOperator {{.*}} <col:3, col:26> 'int' '/' // CHECK-NEXT: | | | |-ParenExpr {{.*}} <col:3> 'int' // CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:23, col:3> 'int' '-' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int' // CHECK-NEXT: | | | | `-ParenExpr {{.*}} <col:3> 'int' // CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:16, <invalid sloc>> 'int' '+' // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:16, col:26> 'int' '-' // CHECK-NEXT: | | | | | |-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | `-ImplicitCastExpr {{.*}} <line:38:5, col:28> 'long' <IntegralCast> // CHECK-NEXT: | | `-BinaryOperator {{.*}} <col:5, col:28> 'int' '/' // CHECK-NEXT: | | |-ParenExpr {{.*}} <col:5> 'int' // CHECK-NEXT: | | | `-BinaryOperator {{.*}} <col:25, col:5> 'int' '-' // CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int' // CHECK-NEXT: | | | `-ParenExpr {{.*}} <col:5> 'int' // CHECK-NEXT: | | | `-BinaryOperator {{.*}} <col:18, <invalid sloc>> 'int' '+' // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:18, col:28> 'int' '-' // CHECK-NEXT: | | | | |-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:28> 'int' 1 // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:28> 'int' 1 // CHECK-NEXT: | `-ImplicitCastExpr {{.*}} <<invalid sloc>> 'long' <IntegralCast> // CHECK-NEXT: | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: |-DeclRefExpr {{.*}} <line:37:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: |-DeclRefExpr {{.*}} <line:38:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: `-DeclRefExpr {{.*}} <line:39:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
data.h
/*! * Copyright (c) 2015 by Contributors * \file data.h * \brief The input data structure of xgboost. * \author Tianqi Chen */ #ifndef XGBOOST_DATA_H_ #define XGBOOST_DATA_H_ #include <dmlc/base.h> #include <dmlc/data.h> #include <rabit/rabit.h> #include <xgboost/base.h> #include <xgboost/span.h> #include <xgboost/host_device_vector.h> #include <memory> #include <numeric> #include <algorithm> #include <string> #include <utility> #include <vector> namespace xgboost { // forward declare dmatrix. class DMatrix; /*! \brief data type accepted by xgboost interface */ enum class DataType : uint8_t { kFloat32 = 1, kDouble = 2, kUInt32 = 3, kUInt64 = 4 }; /*! * \brief Meta information about dataset, always sit in memory. */ class MetaInfo { public: /*! \brief number of data fields in MetaInfo */ static constexpr uint64_t kNumField = 9; /*! \brief number of rows in the data */ uint64_t num_row_{0}; // NOLINT /*! \brief number of columns in the data */ uint64_t num_col_{0}; // NOLINT /*! \brief number of nonzero entries in the data */ uint64_t num_nonzero_{0}; // NOLINT /*! \brief label of each instance */ HostDeviceVector<bst_float> labels_; // NOLINT /*! * \brief the index of begin and end of a group * needed when the learning task is ranking. */ std::vector<bst_group_t> group_ptr_; // NOLINT /*! \brief weights of each instance, optional */ HostDeviceVector<bst_float> weights_; // NOLINT /*! * \brief initialized margins, * if specified, xgboost will start from this init margin * can be used to specify initial prediction to boost from. */ HostDeviceVector<bst_float> base_margin_; // NOLINT /*! * \brief lower bound of the label, to be used for survival analysis (censored regression) */ HostDeviceVector<bst_float> labels_lower_bound_; // NOLINT /*! * \brief upper bound of the label, to be used for survival analysis (censored regression) */ HostDeviceVector<bst_float> labels_upper_bound_; // NOLINT /*! \brief default constructor */ MetaInfo() = default; MetaInfo& operator=(MetaInfo const& that) { this->num_row_ = that.num_row_; this->num_col_ = that.num_col_; this->num_nonzero_ = that.num_nonzero_; this->labels_.Resize(that.labels_.Size()); this->labels_.Copy(that.labels_); this->group_ptr_ = that.group_ptr_; this->weights_.Resize(that.weights_.Size()); this->weights_.Copy(that.weights_); this->base_margin_.Resize(that.base_margin_.Size()); this->base_margin_.Copy(that.base_margin_); return *this; } /*! * \brief Get weight of each instances. * \param i Instance index. * \return The weight. */ inline bst_float GetWeight(size_t i) const { return weights_.Size() != 0 ? weights_.HostVector()[i] : 1.0f; } /*! \brief get sorted indexes (argsort) of labels by absolute value (used by cox loss) */ inline const std::vector<size_t>& LabelAbsSort() const { if (label_order_cache_.size() == labels_.Size()) { return label_order_cache_; } label_order_cache_.resize(labels_.Size()); std::iota(label_order_cache_.begin(), label_order_cache_.end(), 0); const auto& l = labels_.HostVector(); XGBOOST_PARALLEL_SORT(label_order_cache_.begin(), label_order_cache_.end(), [&l](size_t i1, size_t i2) {return std::abs(l[i1]) < std::abs(l[i2]);}); return label_order_cache_; } /*! \brief clear all the information */ void Clear(); /*! * \brief Load the Meta info from binary stream. * \param fi The input stream */ void LoadBinary(dmlc::Stream* fi); /*! * \brief Save the Meta info to binary stream * \param fo The output stream. */ void SaveBinary(dmlc::Stream* fo) const; /*! * \brief Set information in the meta info. * \param key The key of the information. * \param dptr The data pointer of the source array. * \param dtype The type of the source data. * \param num Number of elements in the source array. */ void SetInfo(const char* key, const void* dptr, DataType dtype, size_t num); /*! * \brief Set information in the meta info with array interface. * \param key The key of the information. * \param interface_str String representation of json format array interface. * * [ column_0, column_1, ... column_n ] * * Right now only 1 column is permitted. */ void SetInfo(const char* key, std::string const& interface_str); private: /*! \brief argsort of labels */ mutable std::vector<size_t> label_order_cache_; }; /*! \brief Element from a sparse vector */ struct Entry { /*! \brief feature index */ bst_feature_t index; /*! \brief feature value */ bst_float fvalue; /*! \brief default constructor */ Entry() = default; /*! * \brief constructor with index and value * \param index The feature or row index. * \param fvalue The feature value. */ XGBOOST_DEVICE Entry(bst_feature_t index, bst_float fvalue) : index(index), fvalue(fvalue) {} /*! \brief reversely compare feature values */ inline static bool CmpValue(const Entry& a, const Entry& b) { return a.fvalue < b.fvalue; } inline bool operator==(const Entry& other) const { return (this->index == other.index && this->fvalue == other.fvalue); } }; /*! * \brief Parameters for constructing batches. */ struct BatchParam { /*! \brief The GPU device to use. */ int gpu_id; /*! \brief Maximum number of bins per feature for histograms. */ int max_bin{0}; /*! \brief Page size for external memory mode. */ size_t gpu_page_size; BatchParam() = default; BatchParam(int32_t device, int32_t max_bin, size_t gpu_page_size = 0) : gpu_id{device}, max_bin{max_bin}, gpu_page_size{gpu_page_size} {} inline bool operator!=(const BatchParam& other) const { return gpu_id != other.gpu_id || max_bin != other.max_bin || gpu_page_size != other.gpu_page_size; } }; /*! * \brief In-memory storage unit of sparse batch, stored in CSR format. */ class SparsePage { public: // Offset for each row. HostDeviceVector<bst_row_t> offset; /*! \brief the data of the segments */ HostDeviceVector<Entry> data; size_t base_rowid{}; /*! \brief an instance of sparse vector in the batch */ using Inst = common::Span<Entry const>; /*! \brief get i-th row from the batch */ inline Inst operator[](size_t i) const { const auto& data_vec = data.HostVector(); const auto& offset_vec = offset.HostVector(); size_t size; // in distributed mode, some partitions may not get any instance for a feature. Therefore // we should set the size as zero if (rabit::IsDistributed() && i + 1 >= offset_vec.size()) { size = 0; } else { size = offset_vec[i + 1] - offset_vec[i]; } return {data_vec.data() + offset_vec[i], static_cast<Inst::index_type>(size)}; } /*! \brief constructor */ SparsePage() { this->Clear(); } /*! \return Number of instances in the page. */ inline size_t Size() const { return offset.Size() == 0 ? 0 : offset.Size() - 1; } /*! \return estimation of memory cost of this page */ inline size_t MemCostBytes() const { return offset.Size() * sizeof(size_t) + data.Size() * sizeof(Entry); } /*! \brief clear the page */ inline void Clear() { base_rowid = 0; auto& offset_vec = offset.HostVector(); offset_vec.clear(); offset_vec.push_back(0); data.HostVector().clear(); } /*! \brief Set the base row id for this page. */ inline void SetBaseRowId(size_t row_id) { base_rowid = row_id; } SparsePage GetTranspose(int num_columns) const; void SortRows() { auto ncol = static_cast<bst_omp_uint>(this->Size()); #pragma omp parallel for default(none) shared(ncol) schedule(dynamic, 1) for (bst_omp_uint i = 0; i < ncol; ++i) { if (this->offset.HostVector()[i] < this->offset.HostVector()[i + 1]) { std::sort( this->data.HostVector().begin() + this->offset.HostVector()[i], this->data.HostVector().begin() + this->offset.HostVector()[i + 1], Entry::CmpValue); } } } /*! * \brief Push row block into the page. * \param batch the row batch. */ void Push(const dmlc::RowBlock<uint32_t>& batch); /** * \brief Pushes external data batch onto this page * * \tparam AdapterBatchT * \param batch * \param missing * \param nthread * * \return The maximum number of columns encountered in this input batch. Useful when pushing many adapter batches to work out the total number of columns. */ template <typename AdapterBatchT> uint64_t Push(const AdapterBatchT& batch, float missing, int nthread); /*! * \brief Push a sparse page * \param batch the row page */ void Push(const SparsePage &batch); /*! * \brief Push a SparsePage stored in CSC format * \param batch The row batch to be pushed */ void PushCSC(const SparsePage& batch); }; class CSCPage: public SparsePage { public: CSCPage() : SparsePage() {} explicit CSCPage(SparsePage page) : SparsePage(std::move(page)) {} }; class SortedCSCPage : public SparsePage { public: SortedCSCPage() : SparsePage() {} explicit SortedCSCPage(SparsePage page) : SparsePage(std::move(page)) {} }; class EllpackPageImpl; /*! * \brief A page stored in ELLPACK format. * * This class uses the PImpl idiom (https://en.cppreference.com/w/cpp/language/pimpl) to avoid * including CUDA-specific implementation details in the header. */ class EllpackPage { public: /*! * \brief Default constructor. * * This is used in the external memory case. An empty ELLPACK page is constructed with its content * set later by the reader. */ EllpackPage(); /*! * \brief Constructor from an existing DMatrix. * * This is used in the in-memory case. The ELLPACK page is constructed from an existing DMatrix * in CSR format. */ explicit EllpackPage(DMatrix* dmat, const BatchParam& param); /*! \brief Destructor. */ ~EllpackPage(); /*! \return Number of instances in the page. */ size_t Size() const; /*! \brief Set the base row id for this page. */ void SetBaseRowId(size_t row_id); const EllpackPageImpl* Impl() const { return impl_.get(); } EllpackPageImpl* Impl() { return impl_.get(); } private: std::unique_ptr<EllpackPageImpl> impl_; }; template<typename T> class BatchIteratorImpl { public: virtual ~BatchIteratorImpl() = default; virtual T& operator*() = 0; virtual const T& operator*() const = 0; virtual void operator++() = 0; virtual bool AtEnd() const = 0; }; template<typename T> class BatchIterator { public: using iterator_category = std::forward_iterator_tag; // NOLINT explicit BatchIterator(BatchIteratorImpl<T>* impl) { impl_.reset(impl); } void operator++() { CHECK(impl_ != nullptr); ++(*impl_); } T& operator*() { CHECK(impl_ != nullptr); return *(*impl_); } const T& operator*() const { CHECK(impl_ != nullptr); return *(*impl_); } bool operator!=(const BatchIterator& rhs) const { CHECK(impl_ != nullptr); return !impl_->AtEnd(); } bool AtEnd() const { CHECK(impl_ != nullptr); return impl_->AtEnd(); } private: std::shared_ptr<BatchIteratorImpl<T>> impl_; }; template<typename T> class BatchSet { public: explicit BatchSet(BatchIterator<T> begin_iter) : begin_iter_(std::move(begin_iter)) {} BatchIterator<T> begin() { return begin_iter_; } // NOLINT BatchIterator<T> end() { return BatchIterator<T>(nullptr); } // NOLINT private: BatchIterator<T> begin_iter_; }; /*! * \brief This is data structure that user can pass to DMatrix::Create * to create a DMatrix for training, user can create this data structure * for customized Data Loading on single machine. * * On distributed setting, usually an customized dmlc::Parser is needed instead. */ template<typename T> class DataSource : public dmlc::DataIter<T> { public: /*! * \brief Meta information about the dataset * The subclass need to be able to load this correctly from data. */ MetaInfo info; }; /*! * \brief Internal data structured used by XGBoost during training. * There are two ways to create a customized DMatrix that reads in user defined-format. * * - Provide a dmlc::Parser and pass into the DMatrix::Create * - Alternatively, if data can be represented by an URL, define a new dmlc::Parser and register by * DMLC_REGISTER_DATA_PARSER; * - This works best for user defined data input source, such as data-base, filesystem. * - Provide a DataSource, that can be passed to DMatrix::Create * This can be used to re-use inmemory data structure into DMatrix. */ class DMatrix { public: /*! \brief default constructor */ DMatrix() = default; /*! \brief meta information of the dataset */ virtual MetaInfo& Info() = 0; /*! \brief meta information of the dataset */ virtual const MetaInfo& Info() const = 0; /** * \brief Gets batches. Use range based for loop over BatchSet to access individual batches. */ template<typename T> BatchSet<T> GetBatches(const BatchParam& param = {}); template <typename T> bool PageExists() const; // the following are column meta data, should be able to answer them fast. /*! \return Whether the data columns single column block. */ virtual bool SingleColBlock() const = 0; /*! \brief virtual destructor */ virtual ~DMatrix() = default; /*! \brief Whether the matrix is dense. */ bool IsDense() const { return Info().num_nonzero_ == Info().num_row_ * Info().num_col_; } /*! * \brief Load DMatrix from URI. * \param uri The URI of input. * \param silent Whether print information during loading. * \param load_row_split Flag to read in part of rows, divided among the workers in distributed mode. * \param file_format The format type of the file, used for dmlc::Parser::Create. * By default "auto" will be able to load in both local binary file. * \param page_size Page size for external memory. * \return The created DMatrix. */ static DMatrix* Load(const std::string& uri, bool silent, bool load_row_split, const std::string& file_format = "auto", size_t page_size = kPageSize); /** * \brief Creates a new DMatrix from an external data adapter. * * \tparam AdapterT Type of the adapter. * \param [in,out] adapter View onto an external data. * \param missing Values to count as missing. * \param nthread Number of threads for construction. * \param cache_prefix (Optional) The cache prefix for external memory. * \param page_size (Optional) Size of the page. * * \return a Created DMatrix. */ template <typename AdapterT> static DMatrix* Create(AdapterT* adapter, float missing, int nthread, const std::string& cache_prefix = "", size_t page_size = kPageSize); /*! \brief page size 32 MB */ static const size_t kPageSize = 32UL << 20UL; protected: virtual BatchSet<SparsePage> GetRowBatches() = 0; virtual BatchSet<CSCPage> GetColumnBatches() = 0; virtual BatchSet<SortedCSCPage> GetSortedColumnBatches() = 0; virtual BatchSet<EllpackPage> GetEllpackBatches(const BatchParam& param) = 0; virtual bool EllpackExists() const = 0; virtual bool SparsePageExists() const = 0; }; template<> inline BatchSet<SparsePage> DMatrix::GetBatches(const BatchParam&) { return GetRowBatches(); } template<> inline bool DMatrix::PageExists<EllpackPage>() const { return this->EllpackExists(); } template<> inline bool DMatrix::PageExists<SparsePage>() const { return this->SparsePageExists(); } template<> inline BatchSet<CSCPage> DMatrix::GetBatches(const BatchParam&) { return GetColumnBatches(); } template<> inline BatchSet<SortedCSCPage> DMatrix::GetBatches(const BatchParam&) { return GetSortedColumnBatches(); } template<> inline BatchSet<EllpackPage> DMatrix::GetBatches(const BatchParam& param) { return GetEllpackBatches(param); } } // namespace xgboost namespace dmlc { DMLC_DECLARE_TRAITS(is_pod, xgboost::Entry, true); } #endif // XGBOOST_DATA_H_
ex1.c
#include <stdio.h> int main(void) { #pragma omp parallel { printf("Hello, world!\n"); } return 0; }
concattest1.c
#include <stdlib.h> #include "concattest1.h" void concattest1(float* l,int m,int n,float*output){ #pragma omp parallel for for (int H14 = 0; H14 < 1; H14++) { for (int H15 = 0; H15 < n; H15++) { output[((1 + (m - (1)))) * (H15) + H14] = l[(((m)) * (H15)) + H14]; } } #pragma omp parallel for for (int H16 = 1; H16 < m; H16++) { for (int H25 = 0; H25 < 1; H25++) { output[((1 + (m - (1)))) * (H25) + ((H16 - (1)) + 1)] = l[(((m)) * (H25)) + H16]; } for (int H26 = 1; H26 < n - (1); H26++) { output[((1 + (m - (1)))) * (((H26 - (1)) + 1)) + ((H16 - (1)) + 1)] = l[(((m)) * (H26)) + H16]; } for (int H27 = n - (1); H27 < n; H27++) { output[((1 + (m - (1)))) * (((H27 - (n - (1))) + (1 + (n - (1) - (1))))) + ((H16 - (1)) + 1)] = l[(((m)) * (H27)) + H16]; } } }
openmp_wrapper.h
/*! * Copyright (c) 2017 Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See LICENSE file in the project root for license information. */ #ifndef LIGHTGBM_OPENMP_WRAPPER_H_ #define LIGHTGBM_OPENMP_WRAPPER_H_ #ifdef _OPENMP #include <LightGBM/utils/log.h> #include <omp.h> #include <exception> #include <memory> #include <mutex> #include <stdexcept> #include <vector> inline int OMP_NUM_THREADS() { int ret = 1; #pragma omp parallel #pragma omp master { ret = omp_get_num_threads(); } return ret; } inline void OMP_SET_NUM_THREADS(int num_threads) { static const int default_omp_num_threads = OMP_NUM_THREADS(); if (num_threads > 0) { omp_set_num_threads(num_threads); } else { omp_set_num_threads(default_omp_num_threads); } } class ThreadExceptionHelper { public: ThreadExceptionHelper() { ex_ptr_ = nullptr; } ~ThreadExceptionHelper() { ReThrow(); } void ReThrow() { if (ex_ptr_ != nullptr) { std::rethrow_exception(ex_ptr_); } } void CaptureException() { // only catch first exception. if (ex_ptr_ != nullptr) { return; } std::unique_lock<std::mutex> guard(lock_); if (ex_ptr_ != nullptr) { return; } ex_ptr_ = std::current_exception(); } private: std::exception_ptr ex_ptr_; std::mutex lock_; }; #define OMP_INIT_EX() ThreadExceptionHelper omp_except_helper #define OMP_LOOP_EX_BEGIN() try { #define OMP_LOOP_EX_END() \ } \ catch (std::exception & ex) { \ Log::Warning(ex.what()); \ omp_except_helper.CaptureException(); \ } \ catch (...) { \ omp_except_helper.CaptureException(); \ } #define OMP_THROW_EX() omp_except_helper.ReThrow() #else /* * To be compatible with OpenMP, define a nothrow macro which is used by gcc * openmp, but not by clang. * See also https://github.com/dmlc/dmlc-core/blob/3106c1cbdcc9fc9ef3a2c1d2196a7a6f6616c13d/include/dmlc/omp.h#L14 */ #if defined(__clang__) #undef __GOMP_NOTHROW #define __GOMP_NOTHROW #elif defined(__cplusplus) #undef __GOMP_NOTHROW #define __GOMP_NOTHROW throw() #else #undef __GOMP_NOTHROW #define __GOMP_NOTHROW __attribute__((__nothrow__)) #endif #ifdef _MSC_VER #pragma warning(disable : 4068) // disable unknown pragma warning #endif #ifdef __cplusplus extern "C" { #endif /** Fall here if no OPENMP support, so just simulate a single thread running. All #pragma omp should be ignored by the compiler **/ inline void omp_set_num_threads(int) __GOMP_NOTHROW {} // NOLINT (no cast done here) inline void OMP_SET_NUM_THREADS(int) __GOMP_NOTHROW {} inline int omp_get_num_threads() __GOMP_NOTHROW {return 1;} inline int omp_get_max_threads() __GOMP_NOTHROW {return 1;} inline int omp_get_thread_num() __GOMP_NOTHROW {return 0;} inline int OMP_NUM_THREADS() __GOMP_NOTHROW { return 1; } #ifdef __cplusplus } // extern "C" #endif #define OMP_INIT_EX() #define OMP_LOOP_EX_BEGIN() #define OMP_LOOP_EX_END() #define OMP_THROW_EX() #endif #endif /* LIGHTGBM_OPENMP_WRAPPER_H_ */
3d7pt.c
/* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 24; tile_size[1] = 24; tile_size[2] = 16; tile_size[3] = 512; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k]) + beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] + A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
MM1f.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> #include "matrixUtils/matrixUtils.h" #include "benchmarkUtils/timeUtils.h" // Reserva de memoria #define SIZE_DATA (1024*1024*64*3) static double MEM_CHUNK[SIZE_DATA]; // Version 5. version del algoritmo filas por filas. int main(int argc, char **argv){ int N = (int) atoi(argv[1]); // matrix size NxN int NUM_T = (int) atoi(argv[2]); //number of threads //#pragma omp parallel double *matrixA, *matrixB, *matrixC; matrixA = MEM_CHUNK; matrixB = matrixA + (N * N); matrixC = matrixB + (N * N); // The main process make the init routines //#pragma omp master matrixInitN(N, matrixA, matrixB, matrixC); // printf("Matrix A: \n"); // matrixPrint(N, N, matrixA); // printf("Matrix B: \n"); // matrixPrint(N, N, matrixB); /* matrixT(N, N, matrixB, matrixC); printf("Matrix BT: \n"); matrixPrint(N, N, matrixC); */ omp_set_num_threads(NUM_T); sampleStart(); // Test matrix multiplication with OpenMP #pragma omp parallel for for(int i=0; i<N; i++){ for(int j=0; j<N; j++){ double *ptra, *ptrb, s=0.0; ptra = matrixA + (i*N); ptrb = matrixB + (j*N); for(int k=0; k<N; k++, ptra++, ptrb++) s += (*ptra * *ptrb); matrixC[j+i*N] = s; } } sampleStop(); // printf("Matrix C: \n"); // matrixPrint(N, N, matrixC); printTime(); return 0; }
add_matrices.c
#include<stdio.h> #include<omp.h> #define EME 300 #define ENE 300 #define N EME*ENE void set_matrix(int *); void add_matrix(int *, int *); void print_matrix(int *); int main(){ int A[EME][ENE], B[EME][ENE], *a, *b; double start = omp_get_wtime(); set_matrix(A); set_matrix(B); printf("MATRIX A\n"); //print_matrix(A); printf("MATRIX B\n"); //print_matrix(B); a = &A; b = &B; add_matrix(a, b); printf("MATRIX C = A+B\n"); //print_matrix(A); printf("Execution time: %f\n", omp_get_wtime()-start); return 0; } void set_matrix(int *M){ int i; #pragma omp parallel for for (i=0; i<N; i++){ M[i] = i+1; } } void add_matrix(int *a, int *b){ int i; #pragma omp parallel for for (i=0; i<N; i++){ a[i] += b[i]; } } void print_matrix(int *M){ int i; for (i=0; i<N; i++){ printf("%d\t", *(M+i)); } printf("\n"); }
workspace.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * **/ #include "plasma_workspace.h" #include "plasma_internal.h" #include <omp.h> /******************************************************************************/ int plasma_workspace_create(plasma_workspace_t *work, size_t lwork, plasma_enum_t dtyp) { // Allocate array of pointers. #pragma omp parallel #pragma omp master { work->nthread = omp_get_num_threads(); } work->lwork = lwork; work->dtyp = dtyp; if ((work->spaces = (void**)calloc(work->nthread, sizeof(void*))) == NULL) { free(work->spaces); plasma_error("malloc() failed"); return PlasmaErrorOutOfMemory; } // Each thread allocates its workspace. size_t size = (size_t)lwork * plasma_element_size(work->dtyp); int info = PlasmaSuccess; #pragma omp parallel { int tid = omp_get_thread_num(); if ((work->spaces[tid] = (void*)malloc(size)) == NULL) { info = PlasmaErrorOutOfMemory; } } if (info != PlasmaSuccess) { plasma_workspace_destroy(work); } return info; } /******************************************************************************/ int plasma_workspace_destroy(plasma_workspace_t *work) { if (work->spaces != NULL) { for (int i = 0; i < work->nthread; ++i) { free(work->spaces[i]); work->spaces[i] = NULL; } free(work->spaces); work->spaces = NULL; work->nthread = 0; work->lwork = 0; } return PlasmaSuccess; }
scatter_avx2.c
// create a list of 64 numbers, and only sum the even ones #include <stdio.h> #include <stdlib.h> #define N 32000 #define SCALE 8 int main() { srand(time(NULL)); float *numbers = malloc(sizeof(float)*N); float *result1 = malloc(sizeof(float)*N); float *result2 = malloc(sizeof(float)*N); int *mask = malloc(sizeof(int)*N); // Init the numbers for (int i = 0; i<N; i++) numbers[i] = rand() % 10; for (int i = 0; i<N; i++) { result1[i] = 0; result2[i] = 0; } for (int i = 0; i<N; i++) mask[i] = rand() % N; for (int i = 0; i<SCALE; i++) printf("%.1f ", numbers[i]); puts("\n---"); for (int i = 0; i<SCALE; i++) printf("%d ", mask[i]); puts("\n---"); puts("---------------------------------------------"); //Serial for (int i = 0; i<SCALE; i++) { result1[mask[i]] = numbers[mask[i]]; } #pragma omp simd simdlen(SCALE) for (int i = 0; i<SCALE; i++) { result2[mask[i]] = numbers[mask[i]]; } // print for (int i = 0; i<SCALE; i++) printf("%.1f ", result1[i]); puts("\n---"); for (int i = 0; i<SCALE; i++) printf("%.1f ", result2[i]); puts("\n---"); int errors = 0; for (int i = 0; i<SCALE; i++) { if (result1[i] != result2[i]) ++errors; } printf("Errors: %d\n", errors); //printf("Result1: %f | Result2: %f\n", result1, result2); return 0; }
GB_binop__plus_int16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__plus_int16) // A.*B function (eWiseMult): GB (_AemultB_01__plus_int16) // A.*B function (eWiseMult): GB (_AemultB_02__plus_int16) // A.*B function (eWiseMult): GB (_AemultB_03__plus_int16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__plus_int16) // A*D function (colscale): GB (_AxD__plus_int16) // D*A function (rowscale): GB (_DxB__plus_int16) // C+=B function (dense accum): GB (_Cdense_accumB__plus_int16) // C+=b function (dense accum): GB (_Cdense_accumb__plus_int16) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__plus_int16) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__plus_int16) // C=scalar+B GB (_bind1st__plus_int16) // C=scalar+B' GB (_bind1st_tran__plus_int16) // C=A+scalar GB (_bind2nd__plus_int16) // C=A'+scalar GB (_bind2nd_tran__plus_int16) // C type: int16_t // A type: int16_t // B,b type: int16_t // BinaryOp: cij = (aij + bij) #define GB_ATYPE \ int16_t #define GB_BTYPE \ int16_t #define GB_CTYPE \ int16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int16_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int16_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x + y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_PLUS || GxB_NO_INT16 || GxB_NO_PLUS_INT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__plus_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__plus_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__plus_int16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__plus_int16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int16_t int16_t bwork = (*((int16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__plus_int16) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__plus_int16) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__plus_int16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__plus_int16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__plus_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__plus_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__plus_int16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__plus_int16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *Cx = (int16_t *) Cx_output ; int16_t x = (*((int16_t *) x_input)) ; int16_t *Bx = (int16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int16_t bij = GBX (Bx, p, false) ; Cx [p] = (x + bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__plus_int16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int16_t *Cx = (int16_t *) Cx_output ; int16_t *Ax = (int16_t *) Ax_input ; int16_t y = (*((int16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int16_t aij = GBX (Ax, p, false) ; Cx [p] = (aij + y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x + aij) ; \ } GrB_Info GB (_bind1st_tran__plus_int16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t x = (*((const int16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij + y) ; \ } GrB_Info GB (_bind2nd_tran__plus_int16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t y = (*((const int16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
testcase_1.c
int *var=6; struct mystruct *ms=&var; int auto, static, inline; extern void *k; int p; int p; int p; int *hh(char *p); int main(int b) { int auto=2,b=3,c; c=auto+b; printf("%d",c); struct player { int a; double c; }; int *jj=&auto; System.out.print("java here"); char echo[3]="bash here"; myprintf("CS3300 here"); printf(echo); /* mixing things here a bit */ if(a==9) { //NO-OP } if(a==1) hh(++jj); else if(a==2) hh(jj++); else if(a==3) hh(*jj++); else { //NO-OP } struct player *p; p=(struct player *)malloc(sizeof(struct player)); p->a=1; p->b=2.4; //#pragma omp parallel for for(a=1;a<=5;a++) static(a); int *p; float *j; p=&auto; j=0x1234; printf("*j=%p",(char *)j); void *static=&c; int a=5,b=8,c; c=a<=>b; hh(p); return(0); } int *hh(char *p) { int n=7; scanf("%d",&n); if(n==0) printf("%d\n",n+1); else if(n==1) printf("%d\n",n+2); return(NULL); }
block-5.c
// { dg-do compile } void foo() { #pragma omp master { goto bad1; // { dg-error "invalid exit" } } #pragma omp master { bad1: return; // { dg-error "invalid exit" } } }
prefix-sum-recursive.c
/* @file prefix-sum-recursive.c * @author Robin Vermes * @author Valentin Laurent * @details theoretical log^2(n) prefix-sum */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <sys/resource.h> #include <sys/time.h> #include <omp.h> void display(int * t, int length_t) { int i = 0; for (; i < length_t; ++i) { printf("%d ", t[i]); } printf("\n"); } void prefix_sum(int* T, int n) { if (n > 1) { int * S = (int*) malloc ((n/2+1)*sizeof(int)); int i = 0; S[n/2] = 0; #pragma omp parallel for for (i = 0; i < n/2; ++i) { S[i] = T[2*i] + T[2*i + 1]; } #pragma omp task { prefix_sum(S, n/2); } #pragma omp taskwait #pragma omp parallel for for (i = 0; i < n/2; ++i) { T[i*2 + 1] = S[i]; T[i*2 + 2] = S[i] + T[i*2 + 2]; } } } int main(int argc, char const *argv[]) { /*ARGUMENT CHECKING*/ if (argc < 2) { printf("USAGE : %s <N>\n", argv[0]); exit(1); } /*INITIALIZATION*/ /*sizes of table*/ int n = atoi(argv[1]); /*tables creation*/ int * tab = (int *) malloc ((n+1)*sizeof(int)); /*indices*/ int i = 0; double start; double end; /*initialize rng*/ srand (time(NULL)); /*initialization of tables*/ for (; i < n; ++i) { tab[i] = (int)rand()%10; } tab[n+1] = 0; start = omp_get_wtime(); #pragma omp parallel { #pragma omp single { prefix_sum(tab, n); } } end = omp_get_wtime(); printf("start SP1 with n = %d\n", n); printf("user time used for SP1 : %f\n", end - start); printf("-----------------------------------------------------------------\n"); /*FREE RESOURCES*/ free(tab); return 0; }
GB_unaryop__lnot_uint8_bool.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_uint8_bool // op(A') function: GB_tran__lnot_uint8_bool // C type: uint8_t // A type: bool // cast: uint8_t cij = (uint8_t) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ bool #define GB_CTYPE \ uint8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ bool aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, x) \ uint8_t z = (uint8_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_UINT8 || GxB_NO_BOOL) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_uint8_bool ( uint8_t *restrict Cx, const bool *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_uint8_bool ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
loop-7.c
void foo (void) { #pragma omp for collapse(2) /* { dg-error "invalid OpenMP non-rectangular loop step" } */ for (int i = 0; i < 6; i++) for (int j = 4 * i; j < 7 * i; j += 2) ; #pragma omp for collapse(2) /* { dg-error "invalid OpenMP non-rectangular loop step" } */ for (int i = 0; i < 32; i += 7) for (int j = 3 * i; j < 7 * i; j += 30) ; #pragma omp for collapse(2) for (int i = 0; i < 6; i++) for (int j = 4 * i; j < 6 * i; j += 2) ; #pragma omp for collapse(2) for (int i = 0; i < 6; i += 2) for (int j = 4 * i; j < 7 * i; j += 2) ; #pragma omp for collapse(2) for (int i = 0; i < 6; i += 5) for (int j = 4 * i; j < 7 * i; j += 15) ; }
pompi.h
/* The MIT License (MIT) Copyright (c) 2016 Lukáš Slouka Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ /** * @file pompi.h * @brief This file contains all necessary tools for successful monitoring * of OpenMP multithreaded applications using Performance Application * Programmable Interface (PAPI) in C++ code. For all intents and purposes * it acts like a wrapper. * @author Lukas Slouka <lukslouka@gmail.com> * @version 1.0 */ #pragma once #include <papi.h> #include <vector> #include <string> #include <stdio.h> #include <cstring> #include <algorithm> #ifdef _OPENMP #include <omp.h> #endif /// pompi namespace. namespace pompi { /// Used in methods that support output on either only one thread or all threads. #define ALL_THREADS -1 // Arbitrary boundary for max events #define MAX_EVENTS 20 int g_eventset = PAPI_NULL; #ifdef _OPENMP #pragma omp threadprivate(g_eventset) #endif long long g_start_counter_values[MAX_EVENTS]; #ifdef _OPENMP #pragma omp threadprivate(g_start_counter_values) #endif long long g_end_counter_values[MAX_EVENTS]; #ifdef _OPENMP #pragma omp threadprivate(g_start_counter_values) #endif // Add new output formats here /** * Output formats enum. * Contains all available kinds of output format. */ enum OutputFormat { GNUPLOT, /**< Formats performance data to be usable with GNUplot. */ }; // Add new derived stats here /** * Perived statistics available from PAPI counters. * Contains all available statistics derived from standard PAPI events. */ enum PapiDerivedStat { D_L1_TMR, /**< Derived L1 cache total missrate [%]*/ D_L2_TMR, /**< Derived L2 cache total missrate [%]*/ D_L3_TMR, /**< Derived L3 cache total missrate [%]*/ D_L1_DMR, /**< Derived L1 cache data missrate [%]*/ D_L2_DMR, /**< Derived L2 cache data missrate [%]*/ D_L3_DMR, /**< Derived L3 cache data missrate [%]*/ D_GIPC, /**< Derived graduated instructions per second*/ D_IIPC, /**< Derived issued instructions per second*/ D_BR_MPR, /**< Derived branch missprediction rate [%]*/ D_MFLOPS, /**< Derived MFLOPS*/ D_GFLOPS, /**< Derived GFLOPS*/ D_MIPS, /**< Derived MIPS*/ D_GIPS, /**< Derived GIPS*/ D_TLBM_PC, /**< Derived TLB data misses per count */ D_TLBM_PS, /**< Derived TLB data misses per second */ }; /** * Pompi timer class * Timer class that can be used by itself, but is part of wrapper */ class PompiTimer { private: /** * First call to timer */ double first_call_time_; long long first_call_time_usec_; /** * Last call to timer */ double last_call_time_; long long last_call_time_usec_; /** * Inner timer call (does not change first call) */ double inner_call_time_; long long inner_call_time_usec_; /** * Inner calls aggregated duration */ double inner_calls_sum_; long long inner_calls_sum_usec_; /** * Inner calls count */ unsigned long int repetition_count_; public: /** * A constructor * Initializes all counters to zero */ PompiTimer() { first_call_time_ = 0.0f; first_call_time_usec_ = 0; last_call_time_ = 0.0f; last_call_time_usec_ = 0; inner_call_time_ = 0.0f; inner_call_time_usec_ = 0; inner_calls_sum_ = 0.0f; inner_calls_sum_usec_ = 0; repetition_count_ = 0; } /** * Begins timing * Sets first call if it is zero, otherwise sets inner call time */ void BeginTiming() { #ifdef _OPENMP inner_call_time_ = omp_get_wtime(); if(first_call_time_ == 0.0f) first_call_time_ = inner_call_time_; #endif inner_call_time_usec_ = PAPI_get_real_usec(); if(first_call_time_usec_ == 0) first_call_time_usec_ = inner_call_time_usec_; } /** * Ends timing */ void EndTiming() { #ifdef _OPENMP last_call_time_ = omp_get_wtime(); inner_calls_sum_ += last_call_time_ - inner_call_time_; #endif last_call_time_usec_ = PAPI_get_real_usec(); inner_calls_sum_usec_ += last_call_time_usec_ - inner_call_time_usec_; repetition_count_++; } /** * Clears entire timer */ void ResetTimer() { first_call_time_ = 0.0f; first_call_time_usec_ = 0; last_call_time_ = 0.0f; last_call_time_usec_ = 0; inner_call_time_ = 0.0f; inner_call_time_usec_ = 0; inner_calls_sum_ = 0.0f; inner_calls_sum_usec_ = 0; repetition_count_ = 0; } /** * @return First timer call since latest reset */ double GetFirstCallTime() { #ifndef _OPENMP return first_call_time_usec_ / 1e6; #endif return first_call_time_; } long long GetFirstCallTimeUsec() { return first_call_time_usec_; } /** * @return Last timer call since latest reset */ double GetLastCallTime() { #ifndef _OPENMP return last_call_time_usec_ / 1e6; #endif return last_call_time_; } long long GetLastCallTimeUsec() { return last_call_time_usec_; } /** * @return Number of calls since lates reset */ double GetRepetitionCount() { return repetition_count_; } void SetRepetitionCount(unsigned long int count) { repetition_count_ = count; } /** * @return Sum of all calls since latest reset */ double GetAggregatedTime() { #ifndef _OPENMP return inner_calls_sum_usec_ / 1e6; #endif return inner_calls_sum_; } long long GetAggregatedTimeUsec() { return inner_calls_sum_usec_; } /** * @return Duration between first and last call since latest reset */ double GetTotalTime() { #ifndef _OPENMP return GetTotalTimeUsec() / 1e6; #endif return last_call_time_ - first_call_time_; } long long GetTotalTimeUsec() { return last_call_time_usec_ - first_call_time_usec_; } /** * @return Average time over aggregated time */ double GetAverageTimeOverAggregated() { #ifndef _OPENMP return GetAverageTimeOverAggregatedUsec() / 1e6; #endif return inner_calls_sum_/repetition_count_; } double GetAverageTimeOverAggregatedUsec() { return inner_calls_sum_usec_/repetition_count_; } /** * @return Average time over total time */ double GetAverageTimeOverTotal() { #ifndef _OPENMP return GetAverageTimeOverTotalUsec() / 1e6; #endif return GetTotalTime()/repetition_count_; } double GetAverageTimeOverTotalUsec() { return GetTotalTimeUsec()/repetition_count_; } }; /** * Pompi Base class. * This class is heart and soul of pompi wrapper. Contains all methods * and attributes needed for successful monitoring of performance. */ class Base { private: /** * Pompi timer * Used for all time measurements * @see Start() * @see End() */ PompiTimer timer_; /** * Vector of papi event codes. * Dependant on PAPI_EVENTS enviroment variable. */ std::vector< int > papi_events_; /** * Vector of papi event names. * Content corresponds with papi_events_. */ std::vector< std::string > papi_event_names_; /** * Vector of thread hardware counter values. * Filled by each thread after call to Stop. * @see ClearCounters() * @see GetCounters() * @see Stop() */ std::vector< std::vector< long long > > thread_data_; /** * Maximum number of threads. * Initialized by call to omp_get_max_threads(). */ int max_threads_; /** * Maximum number of events according to number of available hw counters */ int hw_counters_; public: /** * Base constructor * Initializes PAPI library and PAPI library threading * support for OpenMP. Parses PAPI_EVENTS enviroment variable * and modifies papi_events_ and papi_event_names_ accordingly. */ Base(); /** * Base destructor */ ~Base() { PAPI_shutdown(); } public: /** * Tells pompi to monitor additional event. * @param event C string name of PAPI event */ void AddEvent(char * event); /** * Starts counting of all monitored events on all threads. * This method is complemented by Stop method. * @warning Must be called inside of a OpenMP parallel region * @see Stop() */ void Start(); /** * Stops counting of all monitored events on all threads * This method is complemented by Star method. * @warning Must be called inside of a OpenMP parallel region * @see Start() */ void Stop(); /** * Sets repetition count * @param count new count */ void SetRepetitionCount(unsigned long int count); /** * Provides accumulated execution time betwwn all pairs of Start() * and Stop() methods. This behaviour is reset by ClearTimers(). * @return execution time * @see Start() * @see Stop() * @see ClearTimers() */ double GetExecutionTime(); /** * Similar to GetExecutionTime. Used to calculate execution time * averaged on number of trials. * @return average execution time. * @see GetExecutionTime() */ double GetAverageExecutionTime(); /** * Sets all counters to zero. Number of counter depends on * maximum possible threads and count of monitored papi events. * Can be used to clear counters of any singular thread or all of them. * @param thread_id Integer ID of a thread. If thread with thread_id * does not exist, all counters are cleared. */ void ClearCounters(int thread_id = ALL_THREADS); /** * Sets both execution time timers to zero. */ void ClearTimers(); /** * Prints results on standard output. Can be used to output counters * of any singular thread or all threads aggregated (implicit). * @param value Any value selected by user (eg. total number of threads). * @param thread_id ID of a thread to be outputted (invalid thread ID * results in aggregated output on all threads). */ template < typename T > void PrintResults(T value, int thread_id = ALL_THREADS); /** * Simillar to PrintResults. Takes additional file description * parameters to define output file. * @param value Any value selected by user (eg. total number of threads). * @param file_name C string name of a output file. * @param format Output format. * @param thread_id ID of a thread to be outputted (invalid thread ID * results in aggregated output on all threads). */ template < typename T > void PrintResultsToFile(T value, const char * file_name, OutputFormat format, int thread_id = ALL_THREADS); private: /** * Private method used to obtain counters * @param counters array of values where counters are to be saved. * @param thread_id Counters of a thread with this ID will be extracted. */ void GetCounters(long long * counters, int thread_id = ALL_THREADS); /** * Private method used to get index of an event within papi_events_. * @param event_code Integer event code. * @return index. */ int GetEventIndex(int event_code); /** * Private predicate checking availablity of PAPI event within * monitored events. * @param event_code Integer event code. * @return Boolean result of predicate. */ bool EventAvailable(int event_code); /** * Private method obtaining all derived event that are available. * @param stats Vector filled with derived stats. */ void GetDerivedStats(std::vector< PapiDerivedStat > & stats); /** * Private method translating derived stat code to string. * @param stat Derived stat code. * @return String representation of derived stat. */ std::string GetDerivedStatName(PapiDerivedStat stat); /** * Private generic method that computes derived stat value. * @param stat Derived stat to be computed * @param thread_id Derived stat will be computed for a thread with * this thread_id. In case thread_id is invalid, * the stat will be computed aggregated for all threads. * @return Derived stat value */ double ComputeDerivedStat(PapiDerivedStat stat, int thread_id = ALL_THREADS); /** * Private generic method defining print format for GNUPLOT * @param value Any value selected by user (eg. total number of threads). * @param output Output stream * @param thread_id Output will be performed for thread with this ID. * In case thread_id is invalid, output will be * performed aggregated for all threads. */ template < typename T > void PrintGnuplot(T value, FILE * output, int thread_id = ALL_THREADS); }; ///////////////////////////////////// // Implementation PUBLIC METHODS // ///////////////////////////////////// Base::Base() { #ifdef _OPENMP max_threads_ = 24;//omp_get_max_threads(); printf("[Log] OpenMP has been found, Maximum number of threads is %d\n", max_threads_); #else max_threads_ = 1; printf("[Log] OpenMP not found, POMPI will be operating in single thread mode\n"); #endif thread_data_.resize(max_threads_); timer_.ResetTimer(); int PAPI_return_value = PAPI_library_init(PAPI_VER_CURRENT); if(PAPI_return_value != PAPI_VER_CURRENT) { fprintf(stderr, "[Error] Could not initialize PAPI library\n"); exit(1); } printf("[Log] PAPI library successfully initialized\n"); hw_counters_ = PAPI_num_counters(); if(hw_counters_ > MAX_EVENTS) hw_counters_ = MAX_EVENTS; printf("[Log] Maximum number of events is %d\n", hw_counters_); #ifdef _OPENMP PAPI_return_value = PAPI_thread_init((unsigned long (*)(void)) (omp_get_thread_num)); if(PAPI_return_value != PAPI_OK) { fprintf(stderr, "[Error] Coult not initialize OpenMP threading for PAPI\n"); exit(1); } printf("[Log] OpenMP threading for PAPI successfully initialized\n"); #endif // Resolving PAPI events from PAPI_EVENTS enviroment variable char * papi_events = getenv("PAPI_EVENTS"); char * event = strtok(papi_events, "|"); int event_id; while(event != NULL) { AddEvent(event); event = strtok(NULL, "|"); } } void Base::AddEvent(char * event) { if(papi_events_.size() > hw_counters_) { fprintf(stderr, "[Warning] Cannot add any more events, skipping %s\n", event); return; } int event_id; int PAPI_return_value = PAPI_event_name_to_code(event, &event_id); if(PAPI_return_value != PAPI_OK) fprintf(stderr, "[Warning] Papi event `%s` does not exists, skipping\n", event); else { if(std::find(papi_events_.begin(), papi_events_.end(), event_id) == papi_events_.end()) { papi_event_names_.push_back(std::string(event)); papi_events_.push_back(event_id); for(int i = 0; i < max_threads_; ++i) thread_data_[i].push_back(0); printf("[Log] Adding papi event `%s`\n", event); } else fprintf(stderr, "[Warning] Papi event `%s` already listed, skipping\n", event); } } void Base::Start() { timer_.BeginTiming(); #ifdef _OPENMP #pragma omp parallel { PAPI_register_thread(); int thread_id = PAPI_thread_id(); int PAPI_return_value = PAPI_create_eventset(&g_eventset); if(PAPI_return_value != PAPI_OK) { #pragma omp critical fprintf(stderr, "[Error] Failed to initialize PAPI event set for thread #%d\n", thread_id); } PAPI_return_value = PAPI_add_events(g_eventset, &papi_events_[0], papi_events_.size()); if(PAPI_return_value != PAPI_OK) { #pragma omp critical fprintf(stderr, "[Error] Failed to add events to event set for thread #%d\n", thread_id); } PAPI_return_value = PAPI_start(g_eventset); if(PAPI_return_value != PAPI_OK) { #pragma omp critical fprintf(stderr, "[Error] Failed to start counting in thread #%d\n", thread_id); } PAPI_return_value = PAPI_read(g_eventset, g_start_counter_values); if(PAPI_return_value != PAPI_OK) { #pragma omp critical fprintf(stderr, "[Error] Failed to read counters in thread #%d\n", thread_id); } } #else int PAPI_return_value = PAPI_create_eventset(&g_eventset); if(PAPI_return_value != PAPI_OK) fprintf(stderr, "[Error] Failed to initialize PAPI event set\n"); PAPI_return_value = PAPI_add_events(g_eventset, &papi_events_[0], papi_events_.size()); if(PAPI_return_value != PAPI_OK) fprintf(stderr, "[Error] Failed to add events to event set\n"); PAPI_return_value = PAPI_start(g_eventset); if(PAPI_return_value != PAPI_OK) fprintf(stderr, "[Error] Failed to start counting\n"); PAPI_return_value = PAPI_read(g_eventset, g_start_counter_values); if(PAPI_return_value != PAPI_OK) fprintf(stderr, "[Error] Failed to read counters\n"); #endif } void Base::Stop() { #ifdef _OPENMP #pragma omp parallel { int thread_id = PAPI_thread_id(); int PAPI_return_value = PAPI_read(g_eventset, g_end_counter_values); if(PAPI_return_value != PAPI_OK) { #pragma omp critical fprintf(stderr, "[Error] Failed to read counters in thread #%d\n", thread_id); } for(int event = 0; event < papi_events_.size(); ++event) thread_data_[thread_id][event] += g_end_counter_values[event]; PAPI_return_value = PAPI_stop(g_eventset, NULL); if(PAPI_return_value != PAPI_OK) { #pragma omp critical fprintf(stderr, "[Error] Failed to stop counting in thread #%d\n", thread_id); } PAPI_return_value = PAPI_cleanup_eventset(g_eventset); if(PAPI_return_value != PAPI_OK) { #pragma omp critical fprintf(stderr, "[Error] Failed to clean up event set in thread #%d\n", thread_id); } PAPI_return_value = PAPI_destroy_eventset(&g_eventset); if(PAPI_return_value != PAPI_OK) { #pragma omp critical fprintf(stderr, "[Error] Failed to destroy event set in thread #%d\n", thread_id); } PAPI_unregister_thread(); } #else int PAPI_return_value = PAPI_read(g_eventset, g_end_counter_values); if(PAPI_return_value != PAPI_OK) fprintf(stderr, "[Error] Failed to read counters\n"); for(int event = 0; event < papi_events_.size(); ++event) thread_data_[0][event] += g_end_counter_values[event]; PAPI_return_value = PAPI_stop(g_eventset, NULL); if(PAPI_return_value != PAPI_OK) fprintf(stderr, "[Error] Failed to stop counting\n"); PAPI_return_value = PAPI_cleanup_eventset(g_eventset); if(PAPI_return_value != PAPI_OK) fprintf(stderr, "[Error] Failed to clean up event set\n"); PAPI_return_value = PAPI_destroy_eventset(&g_eventset); if(PAPI_return_value != PAPI_OK) fprintf(stderr, "[Error] Failed to destroy event set\n"); #endif timer_.EndTiming(); } void Base::SetRepetitionCount(unsigned long int count) { timer_.SetRepetitionCount(count); } double Base::GetExecutionTime() { return timer_.GetAggregatedTime(); } double Base::GetAverageExecutionTime() { return timer_.GetAverageTimeOverAggregated(); } void Base::ClearCounters(int thread_id) { if((thread_id < 0)||(thread_id > max_threads_)) { for(int thread = 0; thread < max_threads_; ++thread) for(int event = 0; event < papi_events_.size(); ++event) thread_data_[thread][event] = 0; } else { for(int event = 0; event < papi_events_.size(); ++event) thread_data_[thread_id][event] = 0; } } void Base::ClearTimers() { timer_.ResetTimer(); } ////////////////////////////////////// // Implementation PRIVATE methods // ////////////////////////////////////// void Base::GetCounters(long long * counters, int thread_id) { for(int event = 0; event < papi_events_.size(); ++event) counters[event] = 0; if(thread_id == ALL_THREADS) { for(int thread = 0; thread < max_threads_; ++thread) for(int event = 0; event < papi_events_.size(); ++event) counters[event] += thread_data_[thread][event]; } else { for(int event = 0; event < papi_events_.size(); ++event) counters[event] = thread_data_[thread_id][event]; } } bool Base::EventAvailable(int event_code) { std::vector<int>::iterator event; event = std::find(papi_events_.begin(), papi_events_.end(), event_code); return (event == papi_events_.end()) ? false : true; } int Base::GetEventIndex(int event_code) { for(int index = 0; index < papi_events_.size(); ++index) if(papi_events_[index] == event_code) return index; return 0; } // Add new derived stats here void Base::GetDerivedStats(std::vector< PapiDerivedStat > &stats) { if (EventAvailable(PAPI_LD_INS) && EventAvailable(PAPI_SR_INS) && EventAvailable(PAPI_L1_TCM)) stats.push_back(D_L1_TMR); if (EventAvailable(PAPI_L2_TCA) && EventAvailable(PAPI_L2_TCM)) stats.push_back(D_L2_TMR); if (EventAvailable(PAPI_L3_TCA) && EventAvailable(PAPI_L3_TCM)) stats.push_back(D_L3_TMR); if (EventAvailable(PAPI_L1_DCA) && EventAvailable(PAPI_L1_DCM)) stats.push_back(D_L1_DMR); if (EventAvailable(PAPI_L2_DCA) && EventAvailable(PAPI_L2_DCM)) stats.push_back(D_L2_DMR); if (EventAvailable(PAPI_L3_DCA) && EventAvailable(PAPI_L3_DCM)) stats.push_back(D_L3_DMR); if (( EventAvailable(PAPI_BR_MSP) && EventAvailable(PAPI_BR_CN) )|| ( EventAvailable(PAPI_BR_MSP) && EventAvailable(PAPI_BR_PRC) )) stats.push_back(D_BR_MPR); if (EventAvailable(PAPI_TOT_INS) && EventAvailable(PAPI_TOT_CYC)) stats.push_back(D_GIPC); if (EventAvailable(PAPI_TOT_IIS) && EventAvailable(PAPI_TOT_CYC)) stats.push_back(D_IIPC); if (EventAvailable(PAPI_FP_INS)) { stats.push_back(D_MFLOPS); stats.push_back(D_GFLOPS); } if (EventAvailable(PAPI_TOT_INS)) { stats.push_back(D_MIPS); stats.push_back(D_GIPS); } if (EventAvailable(PAPI_TLB_DM)) { stats.push_back(D_TLBM_PC); stats.push_back(D_TLBM_PS); } } double Base::ComputeDerivedStat(PapiDerivedStat stat, int thread_id) { long long counters[papi_events_.size()]; GetCounters(counters, thread_id); // Add new derived stat computations here switch(stat) { case D_L1_TMR: { return 100.0 * (counters[GetEventIndex(PAPI_L1_TCM)] / (double)(counters[GetEventIndex(PAPI_SR_INS)] + counters[GetEventIndex(PAPI_LD_INS)])); } case D_L2_TMR: { return 100.0 * (counters[GetEventIndex(PAPI_L2_TCM)] / (double)counters[GetEventIndex(PAPI_L2_TCA)]); } case D_L3_TMR: { return 100.0 * (counters[GetEventIndex(PAPI_L3_TCM)] / (double)counters[GetEventIndex(PAPI_L3_TCA)]); } case D_L1_DMR: { return 100.0 * (counters[GetEventIndex(PAPI_L1_DCM)] / (double)counters[GetEventIndex(PAPI_L1_DCA)]); } case D_L2_DMR: { return 100.0 * (counters[GetEventIndex(PAPI_L2_DCM)] / (double)counters[GetEventIndex(PAPI_L2_DCA)]); } case D_L3_DMR: { return 100.0 * (counters[GetEventIndex(PAPI_L3_DCM)] / (double)counters[GetEventIndex(PAPI_L3_DCA)]); } case D_BR_MPR: { if(EventAvailable(PAPI_BR_CN)) return 100.0 * (counters[GetEventIndex(PAPI_BR_MSP)] / (double)counters[GetEventIndex(PAPI_BR_CN)]); else return 100.0 * (counters[GetEventIndex(PAPI_BR_MSP)] / (double)(counters[GetEventIndex(PAPI_BR_MSP)] + counters[GetEventIndex(PAPI_BR_PRC)])); } case D_GIPC: { return (static_cast<double>(counters[GetEventIndex(PAPI_TOT_INS)]) / counters[GetEventIndex(PAPI_TOT_CYC)]); } case D_IIPC: { return (static_cast<double>(counters[GetEventIndex(PAPI_TOT_IIS)]) / counters[GetEventIndex(PAPI_TOT_CYC)]); } case D_MFLOPS: { return (static_cast<double>(counters[GetEventIndex(PAPI_FP_INS)]) / timer_.GetAggregatedTime()) / 1e6; } case D_GFLOPS: { return (static_cast<double>(counters[GetEventIndex(PAPI_FP_INS)]) / timer_.GetAggregatedTime()) / 1e9; } case D_MIPS: { return (static_cast<double>(counters[GetEventIndex(PAPI_TOT_INS)]) / timer_.GetAggregatedTime()) / 1e6; } case D_GIPS: { return (static_cast<double>(counters[GetEventIndex(PAPI_TOT_INS)]) / timer_.GetAggregatedTime()) / 1e9; } case D_TLBM_PC: { return (static_cast<double>(counters[GetEventIndex(PAPI_TLB_DM)]) / timer_.GetRepetitionCount()); } case D_TLBM_PS: { return (static_cast<double>(counters[GetEventIndex(PAPI_TLB_DM)]) / timer_.GetAggregatedTime()); } } return 0; } // Add new derived stat translations here std::string Base::GetDerivedStatName(PapiDerivedStat stat) { switch(stat) { case D_L1_TMR: { return "D_L1_TMR"; } case D_L2_TMR: { return "D_L2_TMR"; } case D_L3_TMR: { return "D_L3_TMR"; } case D_L1_DMR: { return "D_L1_DMR"; } case D_L2_DMR: { return "D_L2_DMR"; } case D_L3_DMR: { return "D_L3_DMR"; } case D_BR_MPR: { return "D_BR_MPR"; } case D_GIPC: { return "D_GIPC"; } case D_IIPC: { return "D_IIPC"; } case D_MFLOPS: { return "D_MFLOPS"; } case D_GFLOPS: { return "D_GFLOPS"; } case D_MIPS: { return "D_MIPS"; } case D_GIPS: { return "D_GIPS"; } case D_TLBM_PC: { return "D_TLBM_PC"; } case D_TLBM_PS: { return "D_TLBM_PS"; } } return ""; } template < typename T > void Base::PrintResults(T value, int thread_id) { std::vector< PapiDerivedStat > stats; GetDerivedStats(stats); if((thread_id >= 0)&&(thread_id <= max_threads_)) printf("Results for thread #%d\n", thread_id); else { printf("Aggregated results on all threads\n"); thread_id = ALL_THREADS; } double t_value = static_cast<double>(value); printf("Parameter value: %f\n", t_value); long long results[papi_events_.size()]; GetCounters(results, thread_id); for(int event = 0; event < papi_events_.size(); ++event) printf("%-15s:%20d\n", papi_event_names_[event].c_str(), results[event]); for(int d_event = 0; d_event < stats.size(); ++d_event) printf("%-15s:%20f\n", GetDerivedStatName(stats[d_event]).c_str(), ComputeDerivedStat(stats[d_event], thread_id)); } template < typename T > void Base::PrintResultsToFile(T value, const char * file_name, OutputFormat format, int thread_id) { if((thread_id < 0)||(thread_id > max_threads_)) thread_id = ALL_THREADS; FILE * output; output = fopen(file_name, "a"); // Add new output format methods here switch(format) { case GNUPLOT: { PrintGnuplot(value, output, thread_id); break; } } fclose(output); } // Format specific print methods template < typename T > void Base::PrintGnuplot(T value, FILE * output, int thread_id) { std::vector< PapiDerivedStat > stats; GetDerivedStats(stats); fprintf(output, "#"); if(thread_id != ALL_THREADS) fprintf(output, "THREAD"); fprintf(output, "%15s", "VALUE"); for(int event = 0; event < papi_events_.size(); ++event) fprintf(output, "%20s", papi_event_names_[event].c_str()); for(int d_event = 0; d_event < stats.size(); ++d_event) fprintf(output, "%20s", GetDerivedStatName(stats[d_event]).c_str()); fprintf(output, "\n"); long long results[papi_events_.size()]; GetCounters(results, thread_id); if(thread_id != ALL_THREADS) fprintf(output, "%-7d", thread_id); double t_value = static_cast<double>(value); fprintf(output, "%16f", t_value); for(int event = 0; event < papi_events_.size(); ++event) fprintf(output, "%20d", results[event]); for(int d_event = 0; d_event < stats.size(); ++d_event) fprintf(output, "%20f", ComputeDerivedStat(stats[d_event], thread_id)); fprintf(output, "\n"); } }
zhesv.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @precisions normal z -> s d c * **/ #include "plasma.h" #include "plasma_async.h" #include "plasma_context.h" #include "plasma_descriptor.h" #include "plasma_internal.h" #include "plasma_types.h" #include "plasma_workspace.h" #include <string.h> /***************************************************************************//** * * @ingroup plasma_hesv * * Solves a system of linear equations A * X = B with LTLt factorization. * ******************************************************************************* * * @param[in] uplo * - PlasmaUpper: Upper triangle of A is stored; * - PlasmaLower: Lower triangle of A is stored. * TODO: only support Lower for now * * @param[in] n * The order of the matrix A. n >= 0. * * @param[in] nrhs * The number of right hand sides, i.e., the number of * columns of the matrix B. nrhs >= 0. * * @param[in,out] A * Details of the LTL factorization of the Hermitian matrix A, * as computed by plasma_zhetrf. * * @param[in] lda * The leading dimension of the array A. * * @param[in,out] T * Details of the LU factorization of the band matrix A, as * computed by plasma_zgbtrf. * * @param[in] ldt * The leading dimension of the array T. * * @param[in] ipiv * The pivot indices used for zhetrf; for 1 <= i <= min(m,n), * row i of the matrix was interchanged with row ipiv(i). * * @param[in] ipiv2 * The pivot indices used for zgbtrf; for 1 <= i <= min(m,n), * row i of the matrix was interchanged with row ipiv(i). * * @param[in,out] B * On entry, the n-by-nrhs right hand side matrix B. * On exit, if return value = 0, the n-by-nrhs solution matrix X. * * @param[in] ldb * The leading dimension of the array B. ldb >= max(1,n). * ******************************************************************************* * * @retval PlasmaSuccess successful exit * @retval < 0 if -i, the i-th argument had an illegal value * ******************************************************************************* * * @sa plasma_omp_zhesv * @sa plasma_chesv * @sa plasma_dsysv * @sa plasma_ssysv * @sa plasma_zhetrf * @sa plasma_zhetrs * ******************************************************************************/ int plasma_zhesv(plasma_enum_t uplo, int n, int nrhs, plasma_complex64_t *pA, int lda, int *ipiv, plasma_complex64_t *pT, int ldt, int *ipiv2, plasma_complex64_t *pB, int ldb) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_fatal_error("PLASMA not initialized"); return PlasmaErrorNotInitialized; } // Check input arguments. if (//(uplo != PlasmaUpper) && (uplo != PlasmaLower)) { plasma_error("illegal value of uplo (Upper not supported, yet)"); return -1; } if (n < 0) { plasma_error("illegal value of n"); return -2; } if (nrhs < 0) { plasma_error("illegal value of nrhs"); return -5; } if (lda < imax(1, n)) { plasma_error("illegal value of lda"); return -7; } if (ldb < imax(1, n)) { plasma_error("illegal value of ldb"); return -10; } // quick return if (imax(n, nrhs) == 0) return PlasmaSuccess; // Set tiling parameters. int nb = plasma->nb; // Initialize barrier plasma_barrier_init(&plasma->barrier); // Initialize tile matrix descriptors. plasma_desc_t A; plasma_desc_t T; plasma_desc_t B; int tku = (nb+nb+nb-1)/nb; // number of tiles in upper band (not including diagonal) int tkl = (nb+nb-1)/nb; // number of tiles in lower band (not including diagonal) int lm = (tku+tkl+1)*nb; // since we use zgetrf on panel, we pivot back within panel. // this could fill the last tile of the panel, // and we need extra NB space on the bottom int retval; retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb, n, n, 0, 0, n, n, &A); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); return retval; } retval = plasma_desc_general_band_create(PlasmaComplexDouble, PlasmaGeneral, nb, nb, lm, n, 0, 0, n, n, nb, nb, &T); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_band_create() failed"); return retval; } retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb, n, nrhs, 0, 0, n, nrhs, &B); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); plasma_desc_destroy(&A); return retval; } // Create workspace. plasma_desc_t W; int ldw = (1+5*A.mt)*nb; /* block column */ retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb, ldw, nb, 0, 0, ldw, nb, &W); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); return retval; } // Create sequence. plasma_sequence_t *sequence = NULL; retval = plasma_sequence_create(&sequence); if (retval != PlasmaSuccess) { plasma_error("plasma_sequence_create() failed"); return retval; } // Initialize request. plasma_request_t request = PlasmaRequestInitializer; // Initialize data. memset(T.matrix, 0, ldt*n*sizeof(plasma_complex64_t)); for (int i = 0; i < nb; i++) ipiv[i] = 1+i; // asynchronous block #pragma omp parallel #pragma omp master { // Translate to tile layout. plasma_omp_zge2desc(pA, lda, A, sequence, &request); plasma_omp_zpb2desc(pT, ldt, T, sequence, &request); plasma_omp_zge2desc(pB, ldb, B, sequence, &request); } #pragma omp parallel #pragma omp master { // Call the tile async function. plasma_omp_zhesv(uplo, A, ipiv, T, ipiv2, B, W, sequence, &request); } #pragma omp parallel #pragma omp master { // Translate back to LAPACK layout. plasma_omp_zdesc2ge(B, pB, ldb, sequence, &request); } // implicit synchronization // Free matrix A in tile layout. plasma_desc_destroy(&A); plasma_desc_destroy(&T); plasma_desc_destroy(&B); plasma_desc_destroy(&W); // Return status. int status = sequence->status; plasma_sequence_destroy(sequence); return status; } /***************************************************************************//** * * @ingroup plasma_hesv * * Solves a system of linear equations using previously * computed factorization. * Non-blocking tile version of plasma_zhesv(). * May return before the computation is finished. * Operates on matrices stored by tiles. * All matrices are passed through descriptors. * All dimensions are taken from the descriptors. * Allows for pipelining of operations at runtime. * ******************************************************************************* * * @param[in] uplo * - PlasmaUpper: Upper triangle of A is stored; * - PlasmaLower: Lower triangle of A is stored. * * @param[in] A * The triangular factor U or L from the Cholesky factorization * A = U^H*U or A = L*L^H, computed by plasma_zpotrf. * * @param[in,out] B * On entry, the n-by-nrhs right hand side matrix B. * On exit, if return value = 0, the n-by-nrhs solution matrix X. * * @param[in] sequence * Identifies the sequence of function calls that this call belongs to * (for completion checks and exception handling purposes). Check * the sequence->status for errors. * * @param[out] request * Identifies this function call (for exception handling purposes). * * @retval void * Errors are returned by setting sequence->status and * request->status to error values. The sequence->status and * request->status should never be set to PlasmaSuccess (the * initial values) since another async call may be setting a * failure value at the same time. * ******************************************************************************* * * @sa plasma_zhesv * @sa plasma_omp_zhesv * @sa plasma_omp_chesv * @sa plasma_omp_dsysv * @sa plasma_omp_ssysv * @sa plasma_omp_zhetrf * @sa plasma_omp_zhetrs * ******************************************************************************/ void plasma_omp_zhesv(plasma_enum_t uplo, plasma_desc_t A, int *ipiv, plasma_desc_t T, int *ipiv2, plasma_desc_t B, plasma_desc_t W, plasma_sequence_t *sequence, plasma_request_t *request) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_fatal_error("PLASMA not initialized"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // Check input arguments. if (//(uplo != PlasmaUpper) && (uplo != PlasmaLower)) { plasma_error("illegal value of uplo (Upper not supported, yet)"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(A) != PlasmaSuccess) { plasma_error("invalid A"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(B) != PlasmaSuccess) { plasma_error("invalid B"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (sequence == NULL) { plasma_fatal_error("NULL sequence"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (request == NULL) { plasma_fatal_error("NULL request"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // quick return if (A.n == 0 || B.n == 0) return; // Call the parallel functions. plasma_pzhetrf_aasen(uplo, A, ipiv, T, W, sequence, request); plasma_pzgbtrf(T, ipiv2, sequence, request); if (uplo == PlasmaLower) { plasma_desc_t vA; plasma_desc_t vB; // forward-substitution with L if (A.m > A.nb) { vA = plasma_desc_view(A, A.nb, 0, A.m-A.nb, A.n-A.nb); vB = plasma_desc_view(B, B.nb, 0, B.m-B.nb, B.n); plasma_pzgeswp(PlasmaRowwise, B, ipiv, 1, sequence, request); #pragma omp taskwait plasma_pztrsm(PlasmaLeft, PlasmaLower, PlasmaNoTrans, PlasmaUnit, 1.0, vA, vB, sequence, request); } // solve with band matrix T #pragma omp taskwait plasma_pztbsm(PlasmaLeft, PlasmaLower, PlasmaNoTrans, PlasmaUnit, 1.0, T, B, ipiv2, sequence, request); plasma_pztbsm(PlasmaLeft, PlasmaUpper, PlasmaNoTrans, PlasmaNonUnit, 1.0, T, B, ipiv2, sequence, request); // backward-substitution with L^H if (A.m > A.nb) { plasma_pztrsm(PlasmaLeft, PlasmaLower, PlasmaConjTrans, PlasmaUnit, 1.0, vA, vB, sequence, request); #pragma omp taskwait plasma_pzgeswp(PlasmaRowwise, B, ipiv, -1, sequence, request); } } else { // TODO: upper } }
depend-3.c
#include <stdlib.h> #include <unistd.h> int main () { #pragma omp parallel #pragma omp single { int x = 1, y = 2; #pragma omp taskgroup { #pragma omp task shared (x) depend(in: x) { usleep (10000); if (x != 1) abort (); } #pragma omp taskgroup { #pragma omp task shared (x) depend(in: x) { usleep (15000); if (x != 1) abort (); } #pragma omp task shared (y) depend(inout: y) { if (y != 2) abort (); y = 3; } #pragma omp taskgroup { #pragma omp task shared (x) depend(in: x) { usleep (13000); if (x != 1) abort (); } #pragma omp taskgroup { #pragma omp task shared (x) depend(out: x) x = 2; } } } } } return 0; }
GB_binop__le_uint64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__le_uint64) // A.*B function (eWiseMult): GB (_AemultB_08__le_uint64) // A.*B function (eWiseMult): GB (_AemultB_02__le_uint64) // A.*B function (eWiseMult): GB (_AemultB_04__le_uint64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__le_uint64) // A*D function (colscale): GB (_AxD__le_uint64) // D*A function (rowscale): GB (_DxB__le_uint64) // C+=B function (dense accum): GB (_Cdense_accumB__le_uint64) // C+=b function (dense accum): GB (_Cdense_accumb__le_uint64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__le_uint64) // C=scalar+B GB (_bind1st__le_uint64) // C=scalar+B' GB (_bind1st_tran__le_uint64) // C=A+scalar GB (_bind2nd__le_uint64) // C=A'+scalar GB (_bind2nd_tran__le_uint64) // C type: bool // A type: uint64_t // B,b type: uint64_t // BinaryOp: cij = (aij <= bij) #define GB_ATYPE \ uint64_t #define GB_BTYPE \ uint64_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint64_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint64_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x <= y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LE || GxB_NO_UINT64 || GxB_NO_LE_UINT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__le_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__le_uint64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__le_uint64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type uint64_t uint64_t bwork = (*((uint64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__le_uint64) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__le_uint64) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__le_uint64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__le_uint64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__le_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__le_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__le_uint64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__le_uint64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; uint64_t x = (*((uint64_t *) x_input)) ; uint64_t *Bx = (uint64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint64_t bij = GBX (Bx, p, false) ; Cx [p] = (x <= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__le_uint64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; uint64_t *Ax = (uint64_t *) Ax_input ; uint64_t y = (*((uint64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint64_t aij = GBX (Ax, p, false) ; Cx [p] = (aij <= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x <= aij) ; \ } GrB_Info GB (_bind1st_tran__le_uint64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t x = (*((const uint64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij <= y) ; \ } GrB_Info GB (_bind2nd_tran__le_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t y = (*((const uint64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
simpar-omp.c
#include <omp.h> #include "../include/particles.h" cell_t ***cells_threads; int num_max_threads; cell_t **init_cells(int grid_size) { cell_t *cells_chunk = (cell_t *)calloc(grid_size * grid_size * num_max_threads, sizeof(cell_t)); cell_t **cells_rows = (cell_t **)malloc(sizeof(cell_t *) * grid_size * num_max_threads); cells_threads = (cell_t ***)malloc(sizeof(cell_t **) * num_max_threads); for (int i = 0; i < num_max_threads; i++) { cells_threads[i] = &cells_rows[i * grid_size]; for (int j = 0; j < grid_size; j++) { cells_threads[i][j] = &cells_chunk[(i * grid_size * grid_size) + (j * grid_size)]; } } // Allocate Adjacent Cells Memory cell_t **adjacent_cells_chunk = (cell_t **)malloc(sizeof(cell_t *) * grid_size * grid_size * ADJACENT_CELLS_NUMBER); cell_t ***adjacent_cells_rows = (cell_t ***)malloc(sizeof(cell_t **) * grid_size * grid_size); adjacent_cells = (cell_t ****)malloc(sizeof(cell_t ***) * grid_size); // Initialize Adjacent Cells Memory for (int i = 0; i < grid_size; i++) { adjacent_cells[i] = &adjacent_cells_rows[i * grid_size]; for (int j = 0; j < grid_size; j++) { adjacent_cells[i][j] = &adjacent_cells_chunk[(i * grid_size + j) * ADJACENT_CELLS_NUMBER]; int index_adjacent_cells = 0; cell_t **adjacent_cell = adjacent_cells[i][j]; for (int x = -1; x <= 1; x++) { for (int y = -1; y <= 1; y++) { adjacent_cell[index_adjacent_cells] = &cells_threads[0][(i + x + grid_size) % grid_size][(j + y + grid_size) % grid_size]; index_adjacent_cells++; } } } } return cells_threads[0]; } void calculate_centers_of_mass(particle_t *particles, cell_t **cells, int grid_size, int number_particles) { #pragma omp parallel { int num_threads = omp_get_num_threads(); int thread_num = omp_get_thread_num(); #pragma omp for for (int i = 0; i < number_particles; i++) { particle_t *particle = &particles[i]; cell_t *cell = &cells_threads[thread_num][particle->cell.x][particle->cell.y]; cell->mass_sum += particle->mass; cell->center_of_mass.x += particle->mass * particle->position.x; cell->center_of_mass.y += particle->mass * particle->position.y; } #pragma omp for for (int i = 0; i < grid_size; i++) { for (int j = 0; j < grid_size; j++) { cell_t *cell = &cells[i][j]; for (int k = 1; k < num_threads; k++) { cell_t *thread_cell = &cells_threads[k][i][j]; cell->mass_sum += thread_cell->mass_sum; cell->center_of_mass.x += thread_cell->center_of_mass.x; cell->center_of_mass.y += thread_cell->center_of_mass.y; } if (cell->mass_sum != 0) { cell->center_of_mass.x /= cell->mass_sum; cell->center_of_mass.y /= cell->mass_sum; } } } } } void calculate_new_iteration(particle_t *particles, cell_t **cells, int grid_size, int number_particles) { #pragma omp parallel for for (int i = 0; i < number_particles; i++) { particle_t *particle = &particles[i]; coordinate_t force = {0}, acceleration = {0}; cell_t **particle_adjacent_cell = adjacent_cells[particle->cell.x][particle->cell.y]; // Calculate force for (int i = 0; i < ADJACENT_CELLS_NUMBER; i++) { cell_t *adjacent_cell = particle_adjacent_cell[i]; coordinate_t force_a_b; force_a_b.x = adjacent_cell->center_of_mass.x - particle->position.x; force_a_b.y = adjacent_cell->center_of_mass.y - particle->position.y; double distance_squared = force_a_b.x * force_a_b.x + force_a_b.y * force_a_b.y; if (distance_squared < EPSLON * EPSLON) { continue; } double scalar_force = G * particle->mass * adjacent_cell->mass_sum / (distance_squared * sqrt(distance_squared)); force.x += force_a_b.x * scalar_force; force.y += force_a_b.y * scalar_force; } // Calculate acceleration acceleration.x = force.x / particle->mass; acceleration.y = force.y / particle->mass; // Calculate new position particle->position.x += particle->velocity.x + acceleration.x * 0.5 + 1; particle->position.y += particle->velocity.y + acceleration.y * 0.5 + 1; particle->position.x -= (int)particle->position.x; particle->position.y -= (int)particle->position.y; // Calculate new velocity particle->velocity.x += acceleration.x; particle->velocity.y += acceleration.y; // Calculate new cell position particle->cell.x = particle->position.x * grid_size; particle->cell.y = particle->position.y * grid_size; } } coordinate_t calculate_overall_center_of_mass(particle_t *particles, int number_particles) { double center_of_mass_x = 0, center_of_mass_y = 0; double total_mass = 0; #pragma omp parallel for reduction(+ : total_mass, center_of_mass_x, center_of_mass_y) for (int i = 0; i < number_particles; i++) { particle_t *particle = &particles[i]; total_mass += particle->mass; center_of_mass_x += particle->mass * particle->position.x; center_of_mass_y += particle->mass * particle->position.y; } center_of_mass_x /= total_mass; center_of_mass_y /= total_mass; return (coordinate_t){center_of_mass_x, center_of_mass_y}; } int main(int argc, const char **argv) { if (argc != 5) { printf("Expected 5 arguments, but %d were given\n", argc); exit(1); } int grid_size = atoi(argv[2]); int number_particles = atoi(argv[3]); int n_time_steps = atoi(argv[4]); num_max_threads = omp_get_max_threads(); particle_t *particles; cell_t **cells; // Initialize particles = init_particles(atoi(argv[1]), grid_size, number_particles); cells = init_cells(grid_size); for (int n = 0; n < n_time_steps; n++) { calculate_centers_of_mass(particles, cells, grid_size, number_particles); calculate_new_iteration(particles, cells, grid_size, number_particles); memset(cells[0], 0, sizeof(cell_t) * grid_size * grid_size * num_max_threads); } coordinate_t center_of_mass = calculate_overall_center_of_mass(particles, number_particles); printf("%.2f %.2f\n", particles->position.x, particles->position.y); printf("%.2f %.2f\n", center_of_mass.x, center_of_mass.y); // Free resources free(particles); free(cells_threads[0][0]); free(cells_threads[0]); free(cells_threads); free(adjacent_cells[0][0]); free(adjacent_cells[0]); free(adjacent_cells); return 0; }
core_ssyrk.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/core_blas/core_zsyrk.c, normal z -> s, Fri Sep 28 17:38:23 2018 * **/ #include <plasma_core_blas.h> #include "plasma_types.h" #include "core_lapack.h" /***************************************************************************//** * * @ingroup core_syrk * * Performs one of the symmetric rank k operations * * \f[ C = \alpha A \times A^T + \beta C, \f] * or * \f[ C = \alpha A^T \times A + \beta C, \f] * * where alpha and beta are scalars, C is an n-by-n symmetric * matrix, and A is an n-by-k matrix in the first case and a k-by-n * matrix in the second case. * ******************************************************************************* * * @param[in] uplo * - PlasmaUpper: Upper triangle of C is stored; * - PlasmaLower: Lower triangle of C is stored. * * @param[in] trans * - PlasmaNoTrans: \f[ C = \alpha A \times A^T + \beta C; \f] * - PlasmaTrans: \f[ C = \alpha A^T \times A + \beta C. \f] * * @param[in] n * The order of the matrix C. n >= 0. * * @param[in] k * If trans = PlasmaNoTrans, number of columns of the A matrix; * if trans = PlasmaTrans, number of rows of the A matrix. * * @param[in] alpha * The scalar alpha. * * @param[in] A * A is an lda-by-ka matrix. * If trans = PlasmaNoTrans, ka = k; * if trans = PlasmaTrans, ka = n. * * @param[in] lda * The leading dimension of the array A. * If trans = PlasmaNoTrans, lda >= max(1, n); * if trans = PlasmaTrans, lda >= max(1, k). * * @param[in] beta * The scalar beta. * * @param[in,out] C * C is an ldc-by-n matrix. * On exit, the uplo part of the matrix is overwritten * by the uplo part of the updated matrix. * * @param[in] ldc * The leading dimension of the array C. ldc >= max(1, n). * ******************************************************************************/ __attribute__((weak)) void plasma_core_ssyrk(plasma_enum_t uplo, plasma_enum_t trans, int n, int k, float alpha, const float *A, int lda, float beta, float *C, int ldc) { cblas_ssyrk(CblasColMajor, (CBLAS_UPLO)uplo, (CBLAS_TRANSPOSE)trans, n, k, (alpha), A, lda, (beta), C, ldc); } /******************************************************************************/ void plasma_core_omp_ssyrk( plasma_enum_t uplo, plasma_enum_t trans, int n, int k, float alpha, const float *A, int lda, float beta, float *C, int ldc, plasma_sequence_t *sequence, plasma_request_t *request) { int ak; if (trans == PlasmaNoTrans) ak = k; else ak = n; #pragma omp task depend(in:A[0:lda*ak]) \ depend(inout:C[0:ldc*n]) { if (sequence->status == PlasmaSuccess) plasma_core_ssyrk(uplo, trans, n, k, alpha, A, lda, beta, C, ldc); } }
adjmapbqm.h
// Copyright 2020 D-Wave Systems Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef DIMOD_ADJMAPBQM_H_ #define DIMOD_ADJMAPBQM_H_ #include <map> #include <utility> #include <vector> #include "dimod/utils.h" namespace dimod { template<class V, class B> class AdjMapBQM { public: using bias_type = B; using variable_type = V; using size_type = std::size_t; using outvars_iterator = typename std::map<V, B>::iterator; using const_outvars_iterator = typename std::map<V, B>::const_iterator; // in the future we'd probably like to make this protected std::vector<std::pair<std::map<V, B>, B>> adj; AdjMapBQM() {} template<class BQM> explicit AdjMapBQM(const BQM &bqm) { adj.resize(bqm.num_variables()); for (variable_type v = 0; v < bqm.num_variables(); ++v) { linear(v) = bqm.linear(v); auto span = bqm.neighborhood(v); adj[v].first.insert(span.first, span.second); } } /** * Construct a BQM from a dense array. * * @param dense An array containing the biases. Assumed to contain * `num_variables`^2 elements. The upper and lower triangle are summed. * @param num_variables The number of variables. */ template<class B2> AdjMapBQM(const B2 dense[], size_type num_variables, bool ignore_diagonal = false) { // we know how big our linear is going to be adj.resize(num_variables); bias_type qbias; if (!ignore_diagonal) { for (size_type v = 0; v < num_variables; ++v) { adj[v].second = dense[v*(num_variables+1)]; } } for (size_type u = 0; u < num_variables; ++u) { for (size_type v = u + 1; v < num_variables; ++v) { qbias = dense[u*num_variables+v] + dense[v*num_variables+u]; if (qbias != 0) { adj[u].first.emplace_hint(adj[u].first.end(), v, qbias); adj[v].first.emplace_hint(adj[v].first.end(), u, qbias); } } } } /** * Construct a BQM from a dense array. This constructor is parallelized * and temporarily zeroes out the diagonal of the dense array but restores * it back. * * @param dense An array containing the biases. Assumed to contain * `num_variables`^2 elements. The upper and lower triangle are summed. * @param num_variables The number of variables. */ template <class B2> AdjMapBQM(B2 dense[], size_type num_variables, bool ignore_diagonal = false) { // we know how big our linear is going to be adj.resize(num_variables); // Backup copy of the diagonal of the dense matrix. std::vector<B2> dense_diagonal(num_variables); if (!ignore_diagonal) { #pragma omp parallel for for (size_type v = 0; v < num_variables; ++v) { adj[v].second = dense[v * (num_variables + 1)]; } } #pragma omp parallel { // Zero out the diagonal to avoid expensive checks inside innermost // loop in the code for reading the matrix. The diagonal will be // restored so a backup copy is saved. #pragma omp for schedule(static) for (size_type v = 0; v < num_variables; ++v) { dense_diagonal[v] = dense[v * (num_variables + 1)]; dense[v * (num_variables + 1)] = 0; } size_type counters[BLOCK_SIZE] = {0}; size_type buffer_size = num_variables * BLOCK_SIZE * sizeof(std::pair<variable_type, bias_type>); std::pair<variable_type, bias_type> *temp_buffer = (std::pair<variable_type, bias_type> *)malloc(buffer_size); if (temp_buffer == NULL) { printf("Memory allocation failure.\n"); exit(0); } // We process the matrix in blocks of size BLOCK_SIZE*BLOCK_SIZE to take // advantage of cache locality. Dynamic scheduling is used as we know some // blocks may be more sparse than others and processing them may finish earlier. #pragma omp for schedule(dynamic) for (size_type u_st = 0; u_st < num_variables; u_st += BLOCK_SIZE) { size_type u_end = std::min(u_st + BLOCK_SIZE, num_variables); for (size_type v_st = 0; v_st < num_variables; v_st += BLOCK_SIZE) { size_type v_end = std::min(v_st + BLOCK_SIZE, num_variables); for (size_type u = u_st, n = 0; u < u_end; u++, n++) { size_type counter_u = counters[n]; size_type counter_u_old = counter_u; for (size_type v = v_st; v < v_end; v++) { bias_type qbias = dense[u * num_variables + v] + dense[v * num_variables + u]; if (qbias != 0) { // Even though an intermediate buffer is not needed in case of this // model of bqm, since we cannot preallocate a map using the number // of elements in the buffer, inserting into the map directly here // nullifies the benefits of cache blocking due to reallocation of // the map causing cache pollution. temp_buffer[n * num_variables + counter_u++] = {v, qbias}; } } if (counter_u != counter_u_old) { counters[n] = counter_u; } } } for (size_type n = 0; n < BLOCK_SIZE; n++) { if (counters[n]) { std::copy(temp_buffer + n * num_variables, temp_buffer + n * num_variables + counters[n], std::inserter(adj[u_st + n].first, adj[u_st +n].first.begin())); counters[n] = 0; } } } free(temp_buffer); // Restore the diagonal of the original dense matrix #pragma omp for schedule(static) for (size_type v = 0; v < num_variables; ++v) { dense[v * (num_variables + 1)] = dense_diagonal[v]; } } } /// Add one (disconnected) variable to the BQM and return its index. variable_type add_variable() { adj.resize(adj.size()+1); return adj.size()-1; } /// Get the degree of variable `v`. size_type degree(variable_type v) const { return adj[v].first.size(); } [[deprecated("Use AdjMapBQM::linear(v)")]] bias_type get_linear(variable_type v) const { return linear(v); } std::pair<bias_type, bool> get_quadratic(variable_type u, variable_type v) const { assert(u >= 0 && u < adj.size()); assert(v >= 0 && v < adj.size()); assert(u != v); auto it = adj[u].first.find(v); if (it == adj[u].first.end() || it->first != v) return std::make_pair(0, false); return std::make_pair(it->second, true); } bias_type& linear(variable_type v) { assert(v >= 0 && v < adj.size()); return adj[v].second; } const bias_type& linear(variable_type v) const { assert(v >= 0 && v < adj.size()); return adj[v].second; } std::pair<outvars_iterator, outvars_iterator> neighborhood(variable_type u) { assert(u >= 0 && u < adj.size()); return std::make_pair(adj[u].first.begin(), adj[u].first.end()); } std::pair<const_outvars_iterator, const_outvars_iterator> neighborhood(variable_type u) const { assert(u >= 0 && u < adj.size()); return std::make_pair(adj[u].first.cbegin(), adj[u].first.cend()); } /** * The neighborhood of variable `v`. * * @param A variable `v`. * @param The neighborhood will start with the first out variable that * does not compare less than `start`. * * @returns A pair of iterators pointing to the start and end of the * neighborhood. */ std::pair<const_outvars_iterator, const_outvars_iterator> neighborhood(variable_type v, variable_type start) const { return std::make_pair(adj[v].first.lower_bound(start), adj[v].first.cend()); } size_type num_variables() const { return adj.size(); } size_type num_interactions() const { size_type count = 0; for (auto it = adj.begin(); it != adj.end(); ++it) count += it->first.size(); return count / 2; } variable_type pop_variable() { assert(adj.size() > 0); variable_type v = adj.size() - 1; // remove v from all of its neighbor's neighborhoods for (auto it = adj[v].first.cbegin(); it != adj[v].first.cend(); ++it) adj[it->first].first.erase(v); adj.pop_back(); return adj.size(); } bool remove_interaction(variable_type u, variable_type v) { assert(u >= 0 && u < adj.size()); assert(v >= 0 && v < adj.size()); if (adj[u].first.erase(v) > 0) { adj[v].first.erase(u); return true; } return false; } [[deprecated("Use AdjMapBQM::linear(v)")]] void set_linear(variable_type v, bias_type b) { assert(v >= 0 && v < adj.size()); linear(v) = b; } bool set_quadratic(variable_type u, variable_type v, bias_type b) { assert(u >= 0 && u < adj.size()); assert(v >= 0 && v < adj.size()); assert(u != v); adj[u].first[v] = b; adj[v].first[u] = b; // to be consistent with AdjArrayBQM, we return whether the value was // set return true; } }; } // namespace dimod #endif // DIMOD_ADJMAPBQM_H_
par_add_cycle.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ /****************************************************************************** * * ParAMG cycling routine * *****************************************************************************/ #include "_hypre_parcsr_ls.h" #include "par_amg.h" /*-------------------------------------------------------------------------- * hypre_BoomerAMGCycle *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGAdditiveCycle( void *amg_vdata) { hypre_ParAMGData *amg_data = (hypre_ParAMGData*) amg_vdata; /* Data Structure variables */ hypre_ParCSRMatrix **A_array; hypre_ParCSRMatrix **P_array; hypre_ParCSRMatrix **R_array; hypre_ParCSRMatrix *Lambda; hypre_ParCSRMatrix *Atilde; hypre_ParVector **F_array; hypre_ParVector **U_array; hypre_ParVector *Vtemp; hypre_ParVector *Ztemp; hypre_ParVector *Xtilde, *Rtilde; HYPRE_Int **CF_marker_array; HYPRE_Int num_levels; HYPRE_Int addlvl, add_end; HYPRE_Int additive; HYPRE_Int mult_additive; HYPRE_Int simple; HYPRE_Int add_last_lvl; HYPRE_Int i, j, num_rows; HYPRE_Int n_global; HYPRE_Int rlx_order; /* Local variables */ HYPRE_Int Solve_err_flag = 0; HYPRE_Int level; HYPRE_Int coarse_grid; HYPRE_Int fine_grid; HYPRE_Int rlx_down; HYPRE_Int rlx_up; HYPRE_Int rlx_coarse; HYPRE_Int *grid_relax_type; HYPRE_Int *num_grid_sweeps; HYPRE_Real **l1_norms; HYPRE_Real alpha, beta; HYPRE_Real *u_data; HYPRE_Real *v_data; HYPRE_Real *l1_norms_lvl; HYPRE_Real *D_inv; HYPRE_Real *x_global; HYPRE_Real *r_global; HYPRE_Real *relax_weight; HYPRE_Real *omega; #if 0 HYPRE_Real *D_mat; HYPRE_Real *S_vec; #endif /* Acquire data and allocate storage */ A_array = hypre_ParAMGDataAArray(amg_data); F_array = hypre_ParAMGDataFArray(amg_data); U_array = hypre_ParAMGDataUArray(amg_data); P_array = hypre_ParAMGDataPArray(amg_data); R_array = hypre_ParAMGDataRArray(amg_data); CF_marker_array = hypre_ParAMGDataCFMarkerArray(amg_data); Vtemp = hypre_ParAMGDataVtemp(amg_data); Ztemp = hypre_ParAMGDataZtemp(amg_data); num_levels = hypre_ParAMGDataNumLevels(amg_data); additive = hypre_ParAMGDataAdditive(amg_data); mult_additive = hypre_ParAMGDataMultAdditive(amg_data); simple = hypre_ParAMGDataSimple(amg_data); add_last_lvl = hypre_ParAMGDataAddLastLvl(amg_data); grid_relax_type = hypre_ParAMGDataGridRelaxType(amg_data); Lambda = hypre_ParAMGDataLambda(amg_data); Atilde = hypre_ParAMGDataAtilde(amg_data); Xtilde = hypre_ParAMGDataXtilde(amg_data); Rtilde = hypre_ParAMGDataRtilde(amg_data); l1_norms = hypre_ParAMGDataL1Norms(amg_data); D_inv = hypre_ParAMGDataDinv(amg_data); relax_weight = hypre_ParAMGDataRelaxWeight(amg_data); omega = hypre_ParAMGDataOmega(amg_data); rlx_order = hypre_ParAMGDataRelaxOrder(amg_data); num_grid_sweeps = hypre_ParAMGDataNumGridSweeps(amg_data); /* Initialize */ addlvl = hypre_max(additive, mult_additive); addlvl = hypre_max(addlvl, simple); if (add_last_lvl == -1 ) add_end = num_levels-1; else add_end = add_last_lvl; Solve_err_flag = 0; /*--------------------------------------------------------------------- * Main loop of cycling --- multiplicative version --- V-cycle *--------------------------------------------------------------------*/ /* down cycle */ rlx_down = grid_relax_type[1]; rlx_up = grid_relax_type[2]; rlx_coarse = grid_relax_type[3]; for (level = 0; level < num_levels-1; level++) { fine_grid = level; coarse_grid = level + 1; u_data = hypre_VectorData(hypre_ParVectorLocalVector(U_array[fine_grid])); v_data = hypre_VectorData(hypre_ParVectorLocalVector(Vtemp)); l1_norms_lvl = l1_norms[level]; hypre_ParVectorSetConstantValues(U_array[coarse_grid], 0.0); if (level < addlvl || level > add_end) /* multiplicative version */ { /* smoothing step */ if (rlx_down == 0) { HYPRE_Real *A_data = hypre_CSRMatrixData(hypre_ParCSRMatrixDiag(A_array[fine_grid])); HYPRE_Int *A_i = hypre_CSRMatrixI(hypre_ParCSRMatrixDiag(A_array[fine_grid])); num_rows = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A_array[fine_grid])); for (j=0; j < num_grid_sweeps[1]; j++) { hypre_ParVectorCopy(F_array[fine_grid],Vtemp); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < num_rows; i++) u_data[i] = relax_weight[level]*v_data[i] / A_data[A_i[i]]; } } else if (rlx_down != 18) { /*hypre_BoomerAMGRelax(A_array[fine_grid],F_array[fine_grid],NULL,rlx_down,0,*/ for (j=0; j < num_grid_sweeps[1]; j++) { hypre_BoomerAMGRelaxIF(A_array[fine_grid],F_array[fine_grid], CF_marker_array[fine_grid], rlx_down,rlx_order,1, relax_weight[fine_grid], omega[fine_grid], l1_norms[level], U_array[fine_grid], Vtemp, Ztemp); hypre_ParVectorCopy(F_array[fine_grid],Vtemp); } } else { num_rows = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A_array[fine_grid])); for (j=0; j < num_grid_sweeps[1]; j++) { hypre_ParVectorCopy(F_array[fine_grid],Vtemp); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < num_rows; i++) u_data[i] += v_data[i] / l1_norms_lvl[i]; } } alpha = -1.0; beta = 1.0; hypre_ParCSRMatrixMatvec(alpha, A_array[fine_grid], U_array[fine_grid], beta, Vtemp); alpha = 1.0; beta = 0.0; hypre_ParCSRMatrixMatvecT(alpha,R_array[fine_grid],Vtemp, beta,F_array[coarse_grid]); } else /* additive version */ { hypre_ParVectorCopy(F_array[fine_grid],Vtemp); if (level == 0) /* compute residual */ { hypre_ParVectorCopy(Vtemp, Rtilde); hypre_ParVectorCopy(U_array[fine_grid],Xtilde); } alpha = 1.0; beta = 0.0; hypre_ParCSRMatrixMatvecT(alpha,R_array[fine_grid],Vtemp, beta,F_array[coarse_grid]); } } /* additive smoothing and solve coarse grid */ if (addlvl < num_levels) { if (simple > -1) { x_global = hypre_VectorData(hypre_ParVectorLocalVector(Xtilde)); r_global = hypre_VectorData(hypre_ParVectorLocalVector(Rtilde)); n_global = hypre_VectorSize(hypre_ParVectorLocalVector(Xtilde)); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i=0; i < n_global; i++) x_global[i] += D_inv[i]*r_global[i]; } else { if (num_grid_sweeps[1] > 1) { n_global = hypre_VectorSize(hypre_ParVectorLocalVector(Rtilde)); hypre_ParVector *Tmptilde = hypre_CTAlloc(hypre_ParVector, 1, HYPRE_MEMORY_HOST); hypre_Vector *Tmptilde_local = hypre_SeqVectorCreate(n_global); hypre_SeqVectorInitialize(Tmptilde_local); hypre_ParVectorLocalVector(Tmptilde) = Tmptilde_local; hypre_ParVectorOwnsData(Tmptilde) = 1; hypre_ParCSRMatrixMatvec(1.0, Lambda, Rtilde, 0.0, Tmptilde); hypre_ParVectorScale(2.0,Rtilde); hypre_ParCSRMatrixMatvec(-1.0, Atilde, Tmptilde, 1.0, Rtilde); hypre_ParVectorDestroy(Tmptilde); } hypre_ParCSRMatrixMatvec(1.0, Lambda, Rtilde, 1.0, Xtilde); } if (addlvl == 0) hypre_ParVectorCopy(Xtilde, U_array[0]); } if (add_end < num_levels -1) { fine_grid = num_levels -1; for (j=0; j < num_grid_sweeps[3]; j++) if (rlx_coarse == 18) hypre_ParCSRRelax(A_array[fine_grid], F_array[fine_grid], 1, 1, l1_norms[fine_grid], 1.0, 1.0 ,0,0,0,0, U_array[fine_grid], Vtemp, Ztemp); else hypre_BoomerAMGRelaxIF(A_array[fine_grid],F_array[fine_grid], NULL, rlx_coarse,0,0, relax_weight[fine_grid], omega[fine_grid], l1_norms[fine_grid], U_array[fine_grid], Vtemp, Ztemp); } /* up cycle */ for (level = num_levels-1; level > 0; level--) { fine_grid = level - 1; coarse_grid = level; if (level <= addlvl || level > add_end+1) /* multiplicative version */ { alpha = 1.0; beta = 1.0; hypre_ParCSRMatrixMatvec(alpha, P_array[fine_grid], U_array[coarse_grid], beta, U_array[fine_grid]); if (rlx_up != 18) /*hypre_BoomerAMGRelax(A_array[fine_grid],F_array[fine_grid],NULL,rlx_up,0,*/ for (j=0; j < num_grid_sweeps[2]; j++) hypre_BoomerAMGRelaxIF(A_array[fine_grid],F_array[fine_grid], CF_marker_array[fine_grid], rlx_up,rlx_order,2, relax_weight[fine_grid], omega[fine_grid], l1_norms[fine_grid], U_array[fine_grid], Vtemp, Ztemp); else if (rlx_order) { HYPRE_Int loc_relax_points[2]; loc_relax_points[0] = -1; loc_relax_points[1] = 1; for (j=0; j < num_grid_sweeps[2]; j++) for (i=0; i < 2; i++) hypre_ParCSRRelax_L1_Jacobi(A_array[fine_grid],F_array[fine_grid], CF_marker_array[fine_grid], loc_relax_points[i], 1.0, l1_norms[fine_grid], U_array[fine_grid], Vtemp); } else for (j=0; j < num_grid_sweeps[2]; j++) hypre_ParCSRRelax(A_array[fine_grid], F_array[fine_grid], 1, 1, l1_norms[fine_grid], 1.0, 1.0 ,0,0,0,0, U_array[fine_grid], Vtemp, Ztemp); } else /* additive version */ { alpha = 1.0; beta = 1.0; hypre_ParCSRMatrixMatvec(alpha, P_array[fine_grid], U_array[coarse_grid], beta, U_array[fine_grid]); } } return(Solve_err_flag); } HYPRE_Int hypre_CreateLambda(void *amg_vdata) { hypre_ParAMGData *amg_data = (hypre_ParAMGData*) amg_vdata; /* Data Structure variables */ MPI_Comm comm; hypre_ParCSRMatrix **A_array; hypre_ParVector **F_array; hypre_ParVector **U_array; hypre_ParCSRMatrix *A_tmp; hypre_ParCSRMatrix *Lambda; hypre_CSRMatrix *L_diag; hypre_CSRMatrix *L_offd; hypre_ParCSRMatrix *Atilde; hypre_CSRMatrix *Atilde_diag; hypre_CSRMatrix *Atilde_offd; HYPRE_Real *Atilde_diag_data; HYPRE_Real *Atilde_offd_data; hypre_CSRMatrix *A_tmp_diag; hypre_CSRMatrix *A_tmp_offd; hypre_ParVector *Xtilde; hypre_ParVector *Rtilde; hypre_Vector *Xtilde_local; hypre_Vector *Rtilde_local; hypre_ParCSRCommPkg *comm_pkg; hypre_ParCSRCommPkg *L_comm_pkg = NULL; hypre_ParCSRCommHandle *comm_handle; HYPRE_Real *L_diag_data; HYPRE_Real *L_offd_data; HYPRE_Real *buf_data = NULL; HYPRE_Real *tmp_data; HYPRE_Real *x_data; HYPRE_Real *r_data; HYPRE_Real *l1_norms; HYPRE_Real *A_tmp_diag_data; HYPRE_Real *A_tmp_offd_data; HYPRE_Real *D_data = NULL; HYPRE_Real *D_data_offd = NULL; HYPRE_Int *L_diag_i; HYPRE_Int *L_diag_j; HYPRE_Int *L_offd_i; HYPRE_Int *L_offd_j; HYPRE_Int *Atilde_diag_i; HYPRE_Int *Atilde_diag_j; HYPRE_Int *Atilde_offd_i; HYPRE_Int *Atilde_offd_j; HYPRE_Int *A_tmp_diag_i; HYPRE_Int *A_tmp_offd_i; HYPRE_Int *A_tmp_diag_j; HYPRE_Int *A_tmp_offd_j; HYPRE_Int *L_recv_ptr = NULL; HYPRE_Int *L_send_ptr = NULL; HYPRE_Int *L_recv_procs = NULL; HYPRE_Int *L_send_procs = NULL; HYPRE_Int *L_send_map_elmts = NULL; HYPRE_Int *recv_procs; HYPRE_Int *send_procs; HYPRE_Int *send_map_elmts; HYPRE_Int *send_map_starts; HYPRE_Int *recv_vec_starts; HYPRE_Int *all_send_procs = NULL; HYPRE_Int *all_recv_procs = NULL; HYPRE_Int *remap = NULL; HYPRE_Int *level_start; HYPRE_Int addlvl; HYPRE_Int additive; HYPRE_Int mult_additive; HYPRE_Int num_levels; HYPRE_Int num_add_lvls; HYPRE_Int num_procs; HYPRE_Int num_sends, num_recvs; HYPRE_Int num_sends_L = 0; HYPRE_Int num_recvs_L = 0; HYPRE_Int send_data_L = 0; HYPRE_Int num_rows_L = 0; HYPRE_Int num_rows_tmp = 0; HYPRE_Int num_cols_offd_L = 0; HYPRE_Int num_cols_offd = 0; HYPRE_Int level, i, j, k; HYPRE_Int this_proc, cnt, cnt_diag, cnt_offd; HYPRE_Int A_cnt_diag, A_cnt_offd; HYPRE_Int cnt_recv, cnt_send, cnt_row, row_start; HYPRE_Int start_diag, start_offd, indx, cnt_map; HYPRE_Int start, j_indx, index, cnt_level; HYPRE_Int max_sends, max_recvs; HYPRE_Int ns; /* Local variables */ HYPRE_Int Solve_err_flag = 0; HYPRE_Int num_nonzeros_diag; HYPRE_Int num_nonzeros_offd; HYPRE_Real **l1_norms_ptr = NULL; /*HYPRE_Real *relax_weight = NULL; HYPRE_Int relax_type; */ HYPRE_Int add_rlx; HYPRE_Int add_last_lvl, add_end; HYPRE_Real add_rlx_wt; /* Acquire data and allocate storage */ A_array = hypre_ParAMGDataAArray(amg_data); F_array = hypre_ParAMGDataFArray(amg_data); U_array = hypre_ParAMGDataUArray(amg_data); additive = hypre_ParAMGDataAdditive(amg_data); mult_additive = hypre_ParAMGDataMultAdditive(amg_data); add_last_lvl = hypre_ParAMGDataAddLastLvl(amg_data); num_levels = hypre_ParAMGDataNumLevels(amg_data); /*relax_weight = hypre_ParAMGDataRelaxWeight(amg_data); relax_type = hypre_ParAMGDataGridRelaxType(amg_data)[1];*/ comm = hypre_ParCSRMatrixComm(A_array[0]); add_rlx = hypre_ParAMGDataAddRelaxType(amg_data); add_rlx_wt = hypre_ParAMGDataAddRelaxWt(amg_data); ns = hypre_ParAMGDataNumGridSweeps(amg_data)[1]; hypre_MPI_Comm_size(comm,&num_procs); l1_norms_ptr = hypre_ParAMGDataL1Norms(amg_data); addlvl = hypre_max(additive, mult_additive); if (add_last_lvl != -1) add_end = add_last_lvl+1; else add_end = num_levels; num_add_lvls = add_end+1-addlvl; level_start = hypre_CTAlloc(HYPRE_Int, num_add_lvls+1, HYPRE_MEMORY_HOST); send_data_L = 0; num_rows_L = 0; num_cols_offd_L = 0; num_nonzeros_diag = 0; num_nonzeros_offd = 0; level_start[0] = 0; cnt = 1; max_sends = 0; max_recvs = 0; for (i=addlvl; i < add_end; i++) { A_tmp = A_array[i]; A_tmp_diag = hypre_ParCSRMatrixDiag(A_tmp); A_tmp_offd = hypre_ParCSRMatrixOffd(A_tmp); A_tmp_diag_i = hypre_CSRMatrixI(A_tmp_diag); A_tmp_offd_i = hypre_CSRMatrixI(A_tmp_offd); num_rows_tmp = hypre_CSRMatrixNumRows(A_tmp_diag); num_cols_offd = hypre_CSRMatrixNumCols(A_tmp_offd); num_rows_L += num_rows_tmp; level_start[cnt] = level_start[cnt-1] + num_rows_tmp; cnt++; num_cols_offd_L += num_cols_offd; num_nonzeros_diag += A_tmp_diag_i[num_rows_tmp]; num_nonzeros_offd += A_tmp_offd_i[num_rows_tmp]; comm_pkg = hypre_ParCSRMatrixCommPkg(A_tmp); if (comm_pkg) { num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); max_sends += num_sends; if (num_sends) send_data_L += hypre_ParCSRCommPkgSendMapStart(comm_pkg,num_sends); max_recvs += hypre_ParCSRCommPkgNumRecvs(comm_pkg); } } if (max_sends >= num_procs ||max_recvs >= num_procs) { max_sends = num_procs; max_recvs = num_procs; } if (max_sends) all_send_procs = hypre_CTAlloc(HYPRE_Int, max_sends, HYPRE_MEMORY_HOST); if (max_recvs) all_recv_procs = hypre_CTAlloc(HYPRE_Int, max_recvs, HYPRE_MEMORY_HOST); cnt_send = 0; cnt_recv = 0; if (max_sends || max_recvs) { if (max_sends < num_procs && max_recvs < num_procs) { for (i=addlvl; i < add_end; i++) { A_tmp = A_array[i]; comm_pkg = hypre_ParCSRMatrixCommPkg(A_tmp); if (comm_pkg) { num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); send_procs = hypre_ParCSRCommPkgSendProcs(comm_pkg); recv_procs = hypre_ParCSRCommPkgRecvProcs(comm_pkg); for (j = 0; j < num_sends; j++) all_send_procs[cnt_send++] = send_procs[j]; for (j = 0; j < num_recvs; j++) all_recv_procs[cnt_recv++] = recv_procs[j]; } } if (max_sends) { hypre_qsort0(all_send_procs, 0, max_sends-1); num_sends_L = 1; this_proc = all_send_procs[0]; for (i=1; i < max_sends; i++) { if (all_send_procs[i] > this_proc) { this_proc = all_send_procs[i]; all_send_procs[num_sends_L++] = this_proc; } } L_send_procs = hypre_CTAlloc(HYPRE_Int, num_sends_L, HYPRE_MEMORY_HOST); for (j=0; j < num_sends_L; j++) L_send_procs[j] = all_send_procs[j]; hypre_TFree(all_send_procs, HYPRE_MEMORY_HOST); } if (max_recvs) { hypre_qsort0(all_recv_procs, 0, max_recvs-1); num_recvs_L = 1; this_proc = all_recv_procs[0]; for (i=1; i < max_recvs; i++) { if (all_recv_procs[i] > this_proc) { this_proc = all_recv_procs[i]; all_recv_procs[num_recvs_L++] = this_proc; } } L_recv_procs = hypre_CTAlloc(HYPRE_Int, num_recvs_L, HYPRE_MEMORY_HOST); for (j=0; j < num_recvs_L; j++) L_recv_procs[j] = all_recv_procs[j]; hypre_TFree(all_recv_procs, HYPRE_MEMORY_HOST); } L_recv_ptr = hypre_CTAlloc(HYPRE_Int, num_recvs_L+1, HYPRE_MEMORY_HOST); L_send_ptr = hypre_CTAlloc(HYPRE_Int, num_sends_L+1, HYPRE_MEMORY_HOST); for (i=addlvl; i < add_end; i++) { A_tmp = A_array[i]; comm_pkg = hypre_ParCSRMatrixCommPkg(A_tmp); if (comm_pkg) { num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); send_procs = hypre_ParCSRCommPkgSendProcs(comm_pkg); recv_procs = hypre_ParCSRCommPkgRecvProcs(comm_pkg); send_map_starts = hypre_ParCSRCommPkgSendMapStarts(comm_pkg); recv_vec_starts = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg); } else { num_sends = 0; num_recvs = 0; } for (k = 0; k < num_sends; k++) { this_proc = hypre_BinarySearch(L_send_procs,send_procs[k],num_sends_L); L_send_ptr[this_proc+1] += send_map_starts[k+1]-send_map_starts[k]; } for (k = 0; k < num_recvs; k++) { this_proc = hypre_BinarySearch(L_recv_procs,recv_procs[k],num_recvs_L); L_recv_ptr[this_proc+1] += recv_vec_starts[k+1]-recv_vec_starts[k]; } } L_recv_ptr[0] = 0; for (i=1; i < num_recvs_L; i++) L_recv_ptr[i+1] += L_recv_ptr[i]; L_send_ptr[0] = 0; for (i=1; i < num_sends_L; i++) L_send_ptr[i+1] += L_send_ptr[i]; } else { num_recvs_L = 0; num_sends_L = 0; for (i=addlvl; i < add_end; i++) { A_tmp = A_array[i]; comm_pkg = hypre_ParCSRMatrixCommPkg(A_tmp); if (comm_pkg) { num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); send_procs = hypre_ParCSRCommPkgSendProcs(comm_pkg); recv_procs = hypre_ParCSRCommPkgRecvProcs(comm_pkg); send_map_starts = hypre_ParCSRCommPkgSendMapStarts(comm_pkg); recv_vec_starts = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg); for (j = 0; j < num_sends; j++) { this_proc = send_procs[j]; if (all_send_procs[this_proc] == 0) num_sends_L++; all_send_procs[this_proc] += send_map_starts[j+1]-send_map_starts[j]; } for (j = 0; j < num_recvs; j++) { this_proc = recv_procs[j]; if (all_recv_procs[this_proc] == 0) num_recvs_L++; all_recv_procs[this_proc] += recv_vec_starts[j+1]-recv_vec_starts[j]; } } } if (max_sends) { L_send_procs = hypre_CTAlloc(HYPRE_Int, num_sends_L, HYPRE_MEMORY_HOST); L_send_ptr = hypre_CTAlloc(HYPRE_Int, num_sends_L+1, HYPRE_MEMORY_HOST); num_sends_L = 0; for (j=0; j < num_procs; j++) { this_proc = all_send_procs[j]; if (this_proc) { L_send_procs[num_sends_L++] = j; L_send_ptr[num_sends_L] = this_proc + L_send_ptr[num_sends_L-1]; } } } if (max_recvs) { L_recv_procs = hypre_CTAlloc(HYPRE_Int, num_recvs_L, HYPRE_MEMORY_HOST); L_recv_ptr = hypre_CTAlloc(HYPRE_Int, num_recvs_L+1, HYPRE_MEMORY_HOST); num_recvs_L = 0; for (j=0; j < num_procs; j++) { this_proc = all_recv_procs[j]; if (this_proc) { L_recv_procs[num_recvs_L++] = j; L_recv_ptr[num_recvs_L] = this_proc + L_recv_ptr[num_recvs_L-1]; } } } } } if (max_sends) hypre_TFree(all_send_procs, HYPRE_MEMORY_HOST); if (max_recvs) hypre_TFree(all_recv_procs, HYPRE_MEMORY_HOST); L_diag = hypre_CSRMatrixCreate(num_rows_L, num_rows_L, num_nonzeros_diag); L_offd = hypre_CSRMatrixCreate(num_rows_L, num_cols_offd_L, num_nonzeros_offd); hypre_CSRMatrixInitialize(L_diag); hypre_CSRMatrixInitialize(L_offd); if (num_nonzeros_diag) { L_diag_data = hypre_CSRMatrixData(L_diag); L_diag_j = hypre_CSRMatrixJ(L_diag); } L_diag_i = hypre_CSRMatrixI(L_diag); if (num_nonzeros_offd) { L_offd_data = hypre_CSRMatrixData(L_offd); L_offd_j = hypre_CSRMatrixJ(L_offd); } L_offd_i = hypre_CSRMatrixI(L_offd); if (ns > 1) { Atilde_diag = hypre_CSRMatrixCreate(num_rows_L, num_rows_L, num_nonzeros_diag); Atilde_offd = hypre_CSRMatrixCreate(num_rows_L, num_cols_offd_L, num_nonzeros_offd); hypre_CSRMatrixInitialize(Atilde_diag); hypre_CSRMatrixInitialize(Atilde_offd); if (num_nonzeros_diag) { Atilde_diag_data = hypre_CSRMatrixData(Atilde_diag); Atilde_diag_j = hypre_CSRMatrixJ(Atilde_diag); } Atilde_diag_i = hypre_CSRMatrixI(Atilde_diag); if (num_nonzeros_offd) { Atilde_offd_data = hypre_CSRMatrixData(Atilde_offd); Atilde_offd_j = hypre_CSRMatrixJ(Atilde_offd); } Atilde_offd_i = hypre_CSRMatrixI(Atilde_offd); } if (num_rows_L) D_data = hypre_CTAlloc(HYPRE_Real, num_rows_L, HYPRE_MEMORY_HOST); if (send_data_L) { L_send_map_elmts = hypre_CTAlloc(HYPRE_Int, send_data_L, HYPRE_MEMORY_HOST); buf_data = hypre_CTAlloc(HYPRE_Real, send_data_L, HYPRE_MEMORY_HOST); } if (num_cols_offd_L) { D_data_offd = hypre_CTAlloc(HYPRE_Real, num_cols_offd_L, HYPRE_MEMORY_HOST); /*L_col_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd_L);*/ remap = hypre_CTAlloc(HYPRE_Int, num_cols_offd_L, HYPRE_MEMORY_HOST); } Rtilde = hypre_CTAlloc(hypre_ParVector, 1, HYPRE_MEMORY_HOST); Rtilde_local = hypre_SeqVectorCreate(num_rows_L); hypre_SeqVectorInitialize(Rtilde_local); hypre_ParVectorLocalVector(Rtilde) = Rtilde_local; hypre_ParVectorOwnsData(Rtilde) = 1; Xtilde = hypre_CTAlloc(hypre_ParVector, 1, HYPRE_MEMORY_HOST); Xtilde_local = hypre_SeqVectorCreate(num_rows_L); hypre_SeqVectorInitialize(Xtilde_local); hypre_ParVectorLocalVector(Xtilde) = Xtilde_local; hypre_ParVectorOwnsData(Xtilde) = 1; x_data = hypre_VectorData(hypre_ParVectorLocalVector(Xtilde)); r_data = hypre_VectorData(hypre_ParVectorLocalVector(Rtilde)); cnt = 0; cnt_level = 0; cnt_diag = 0; cnt_offd = 0; cnt_row = 1; L_diag_i[0] = 0; L_offd_i[0] = 0; if (ns > 1) { A_cnt_diag = 0; A_cnt_offd = 0; Atilde_diag_i[0] = 0; Atilde_offd_i[0] = 0; } for (level=addlvl; level < add_end; level++) { row_start = level_start[cnt_level]; if (level != 0) { tmp_data = hypre_VectorData(hypre_ParVectorLocalVector(F_array[level])); if (tmp_data) { hypre_TFree(tmp_data, hypre_VectorMemoryLocation(hypre_ParVectorLocalVector(F_array[level]))); } hypre_VectorData(hypre_ParVectorLocalVector(F_array[level])) = &r_data[row_start]; hypre_VectorOwnsData(hypre_ParVectorLocalVector(F_array[level])) = 0; tmp_data = hypre_VectorData(hypre_ParVectorLocalVector(U_array[level])); if (tmp_data) { hypre_TFree(tmp_data, hypre_VectorMemoryLocation(hypre_ParVectorLocalVector(U_array[level]))); } hypre_VectorData(hypre_ParVectorLocalVector(U_array[level])) = &x_data[row_start]; hypre_VectorOwnsData(hypre_ParVectorLocalVector(U_array[level])) = 0; } cnt_level++; start_diag = L_diag_i[cnt_row-1]; start_offd = L_offd_i[cnt_row-1]; A_tmp = A_array[level]; A_tmp_diag = hypre_ParCSRMatrixDiag(A_tmp); A_tmp_offd = hypre_ParCSRMatrixOffd(A_tmp); comm_pkg = hypre_ParCSRMatrixCommPkg(A_tmp); A_tmp_diag_i = hypre_CSRMatrixI(A_tmp_diag); A_tmp_offd_i = hypre_CSRMatrixI(A_tmp_offd); A_tmp_diag_j = hypre_CSRMatrixJ(A_tmp_diag); A_tmp_offd_j = hypre_CSRMatrixJ(A_tmp_offd); A_tmp_diag_data = hypre_CSRMatrixData(A_tmp_diag); A_tmp_offd_data = hypre_CSRMatrixData(A_tmp_offd); num_rows_tmp = hypre_CSRMatrixNumRows(A_tmp_diag); if (comm_pkg) { num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); send_procs = hypre_ParCSRCommPkgSendProcs(comm_pkg); recv_procs = hypre_ParCSRCommPkgRecvProcs(comm_pkg); send_map_starts = hypre_ParCSRCommPkgSendMapStarts(comm_pkg); send_map_elmts = hypre_ParCSRCommPkgSendMapElmts(comm_pkg); recv_vec_starts = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg); } else { num_sends = 0; num_recvs = 0; } /* Compute new combined communication package */ for (i=0; i < num_sends; i++) { this_proc = hypre_BinarySearch(L_send_procs,send_procs[i],num_sends_L); indx = L_send_ptr[this_proc]; for (j=send_map_starts[i]; j < send_map_starts[i+1]; j++) { L_send_map_elmts[indx++] = row_start + send_map_elmts[j]; } L_send_ptr[this_proc] = indx; } cnt_map = 0; for (i = 0; i < num_recvs; i++) { this_proc = hypre_BinarySearch(L_recv_procs,recv_procs[i],num_recvs_L); indx = L_recv_ptr[this_proc]; for (j=recv_vec_starts[i]; j < recv_vec_starts[i+1]; j++) { remap[cnt_map++] = indx++; } L_recv_ptr[this_proc] = indx; } /* Compute Lambda */ if (add_rlx == 0) { /*HYPRE_Real rlx_wt = relax_weight[level];*/ #ifdef HYPRE_USING_OPENMP #pragma omp for private(i) HYPRE_SMP_SCHEDULE #endif for (i=0; i < num_rows_tmp; i++) { D_data[i] = add_rlx_wt/A_tmp_diag_data[A_tmp_diag_i[i]]; L_diag_i[cnt_row+i] = start_diag + A_tmp_diag_i[i+1]; L_offd_i[cnt_row+i] = start_offd + A_tmp_offd_i[i+1]; } if (ns > 1) for (i=0; i < num_rows_tmp; i++) { Atilde_diag_i[cnt_row+i] = start_diag + A_tmp_diag_i[i+1]; Atilde_offd_i[cnt_row+i] = start_offd + A_tmp_offd_i[i+1]; } } else { l1_norms = l1_norms_ptr[level]; #ifdef HYPRE_USING_OPENMP #pragma omp for private(i) HYPRE_SMP_SCHEDULE #endif for (i=0; i < num_rows_tmp; i++) { D_data[i] = 1.0/l1_norms[i]; L_diag_i[cnt_row+i] = start_diag + A_tmp_diag_i[i+1]; L_offd_i[cnt_row+i] = start_offd + A_tmp_offd_i[i+1]; } if (ns > 1) for (i=0; i < num_rows_tmp; i++) { Atilde_diag_i[cnt_row+i] = start_diag + A_tmp_diag_i[i+1]; Atilde_offd_i[cnt_row+i] = start_offd + A_tmp_offd_i[i+1]; } } if (num_procs > 1) { index = 0; for (i=0; i < num_sends; i++) { start = send_map_starts[i]; for (j=start; j < send_map_starts[i+1]; j++) buf_data[index++] = D_data[send_map_elmts[j]]; } comm_handle = hypre_ParCSRCommHandleCreate(1, comm_pkg, buf_data, D_data_offd); hypre_ParCSRCommHandleDestroy(comm_handle); } for (i = 0; i < num_rows_tmp; i++) { j_indx = A_tmp_diag_i[i]; if (ns > 1) { Atilde_diag_data[A_cnt_diag] = A_tmp_diag_data[j_indx]; Atilde_diag_j[A_cnt_diag++] = i+row_start; } L_diag_data[cnt_diag] = (2.0 - A_tmp_diag_data[j_indx]*D_data[i])*D_data[i]; L_diag_j[cnt_diag++] = i+row_start; for (j=A_tmp_diag_i[i]+1; j < A_tmp_diag_i[i+1]; j++) { j_indx = A_tmp_diag_j[j]; L_diag_data[cnt_diag] = (- A_tmp_diag_data[j]*D_data[j_indx])*D_data[i]; L_diag_j[cnt_diag++] = j_indx+row_start; } for (j=A_tmp_offd_i[i]; j < A_tmp_offd_i[i+1]; j++) { j_indx = A_tmp_offd_j[j]; L_offd_data[cnt_offd] = (- A_tmp_offd_data[j]*D_data_offd[j_indx])*D_data[i]; L_offd_j[cnt_offd++] = remap[j_indx]; } if (ns > 1) { for (j=A_tmp_diag_i[i]+1; j < A_tmp_diag_i[i+1]; j++) { j_indx = A_tmp_diag_j[j]; Atilde_diag_data[A_cnt_diag] = A_tmp_diag_data[j]; Atilde_diag_j[A_cnt_diag++] = j_indx+row_start; } for (j=A_tmp_offd_i[i]; j < A_tmp_offd_i[i+1]; j++) { j_indx = A_tmp_offd_j[j]; Atilde_offd_data[A_cnt_offd] = A_tmp_offd_data[j]; Atilde_offd_j[A_cnt_offd++] = remap[j_indx]; } } } cnt_row += num_rows_tmp; } if (L_send_ptr) { for (i=num_sends_L-1; i > 0; i--) L_send_ptr[i] = L_send_ptr[i-1]; L_send_ptr[0] = 0; } else L_send_ptr = hypre_CTAlloc(HYPRE_Int, 1, HYPRE_MEMORY_HOST); if (L_recv_ptr) { for (i=num_recvs_L-1; i > 0; i--) L_recv_ptr[i] = L_recv_ptr[i-1]; L_recv_ptr[0] = 0; } else L_recv_ptr = hypre_CTAlloc(HYPRE_Int, 1, HYPRE_MEMORY_HOST); L_comm_pkg = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST); hypre_ParCSRCommPkgNumRecvs(L_comm_pkg) = num_recvs_L; hypre_ParCSRCommPkgNumSends(L_comm_pkg) = num_sends_L; hypre_ParCSRCommPkgRecvProcs(L_comm_pkg) = L_recv_procs; hypre_ParCSRCommPkgSendProcs(L_comm_pkg) = L_send_procs; hypre_ParCSRCommPkgRecvVecStarts(L_comm_pkg) = L_recv_ptr; hypre_ParCSRCommPkgSendMapStarts(L_comm_pkg) = L_send_ptr; hypre_ParCSRCommPkgSendMapElmts(L_comm_pkg) = L_send_map_elmts; hypre_ParCSRCommPkgComm(L_comm_pkg) = comm; Lambda = hypre_CTAlloc(hypre_ParCSRMatrix, 1, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixDiag(Lambda) = L_diag; hypre_ParCSRMatrixOffd(Lambda) = L_offd; hypre_ParCSRMatrixCommPkg(Lambda) = L_comm_pkg; hypre_ParCSRMatrixComm(Lambda) = comm; hypre_ParCSRMatrixOwnsData(Lambda) = 1; if (ns > 1) { /*hypre_ParCSRCommPkg *A_comm_pkg = NULL; HYPRE_Int *A_recv_ptr = NULL; HYPRE_Int *A_send_ptr = NULL; HYPRE_Int *A_recv_procs = NULL; HYPRE_Int *A_send_procs = NULL; HYPRE_Int *A_send_map_elmts = NULL; A_comm_pkg = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST); A_recv_ptr = hypre_CTAlloc(HYPRE_Int, num_recvs+1, HYPRE_MEMORY_HOST); A_send_ptr = hypre_CTAlloc(HYPRE_Int, num_sends+1, HYPRE_MEMORY_HOST); A_recv_procs = hypre_CTAlloc(HYPRE_Int, num_recvs_L, HYPRE_MEMORY_HOST); A_send_procs = hypre_CTAlloc(HYPRE_Int, num_sends_L, HYPRE_MEMORY_HOST); A_send_map_elmts = hypre_CTAlloc(HYPRE_Int, L_send_ptr[num_sends_L], HYPRE_MEMORY_HOST); for (i=0; i<num_recvs_L+1; i++) A_recv_ptr[i] = L_recv_ptr[i]; for (i=0; i<num_sends_L+1; i++) A_send_ptr[i] = L_send_ptr[i]; for (i=0; i<num_recvs_L; i++) A_recv_procs[i] = L_recv_procs[i]; for (i=0; i<num_sends_L; i++) A_send_procs[i] = L_send_procs[i]; for (i=0; i < L_send_ptr[num_sends_L]; i++) A_send_map_elmts[i] = L_send_map_elmts[i]; hypre_ParCSRCommPkgNumRecvs(A_comm_pkg) = num_recvs_L; hypre_ParCSRCommPkgNumSends(A_comm_pkg) = num_sends_L; hypre_ParCSRCommPkgRecvProcs(A_comm_pkg) = A_recv_procs; hypre_ParCSRCommPkgSendProcs(A_comm_pkg) = A_send_procs; hypre_ParCSRCommPkgRecvVecStarts(A_comm_pkg) = A_recv_ptr; hypre_ParCSRCommPkgSendMapStarts(A_comm_pkg) = A_send_ptr; hypre_ParCSRCommPkgSendMapElmts(A_comm_pkg) = A_send_map_elmts; hypre_ParCSRCommPkgComm(A_comm_pkg) = comm; */ Atilde = hypre_CTAlloc(hypre_ParCSRMatrix, 1, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixDiag(Atilde) = Atilde_diag; hypre_ParCSRMatrixOffd(Atilde) = Atilde_offd; hypre_ParCSRMatrixCommPkg(Atilde) = L_comm_pkg; hypre_ParCSRMatrixComm(Atilde) = comm; hypre_ParCSRMatrixOwnsData(Atilde) = 1; hypre_ParAMGDataAtilde(amg_data) = Atilde; } hypre_ParAMGDataLambda(amg_data) = Lambda; hypre_ParAMGDataRtilde(amg_data) = Rtilde; hypre_ParAMGDataXtilde(amg_data) = Xtilde; hypre_TFree(D_data_offd, HYPRE_MEMORY_HOST); hypre_TFree(D_data, HYPRE_MEMORY_HOST); if (num_procs > 1) hypre_TFree(buf_data, HYPRE_MEMORY_HOST); hypre_TFree(remap, HYPRE_MEMORY_HOST); hypre_TFree(buf_data, HYPRE_MEMORY_HOST); hypre_TFree(level_start, HYPRE_MEMORY_HOST); return Solve_err_flag; } HYPRE_Int hypre_CreateDinv(void *amg_vdata) { hypre_ParAMGData *amg_data = (hypre_ParAMGData*) amg_vdata; /* Data Structure variables */ hypre_ParCSRMatrix **A_array; hypre_ParVector **F_array; hypre_ParVector **U_array; hypre_ParCSRMatrix *A_tmp; hypre_CSRMatrix *A_tmp_diag; hypre_ParVector *Xtilde; hypre_ParVector *Rtilde; hypre_Vector *Xtilde_local; hypre_Vector *Rtilde_local; HYPRE_Real *x_data; HYPRE_Real *r_data; HYPRE_Real *tmp_data; HYPRE_Real *D_inv = NULL; /*HYPRE_Real *relax_weight = NULL; HYPRE_Real relax_type;*/ HYPRE_Int addlvl; HYPRE_Int num_levels; HYPRE_Int num_rows_L; HYPRE_Int num_rows_tmp; HYPRE_Int level, i; HYPRE_Int add_rlx; HYPRE_Real add_rlx_wt; HYPRE_Int add_last_lvl, add_end; /* Local variables */ HYPRE_Int Solve_err_flag = 0; HYPRE_Real **l1_norms_ptr = NULL; HYPRE_Real *l1_norms; HYPRE_Int l1_start; /* Acquire data and allocate storage */ A_array = hypre_ParAMGDataAArray(amg_data); F_array = hypre_ParAMGDataFArray(amg_data); U_array = hypre_ParAMGDataUArray(amg_data); addlvl = hypre_ParAMGDataSimple(amg_data); num_levels = hypre_ParAMGDataNumLevels(amg_data); add_rlx_wt = hypre_ParAMGDataAddRelaxWt(amg_data); add_rlx = hypre_ParAMGDataAddRelaxType(amg_data); add_last_lvl = hypre_ParAMGDataAddLastLvl(amg_data); /*relax_weight = hypre_ParAMGDataRelaxWeight(amg_data); relax_type = hypre_ParAMGDataGridRelaxType(amg_data)[1];*/ l1_norms_ptr = hypre_ParAMGDataL1Norms(amg_data); /* smooth_option = hypre_ParAMGDataSmoothOption(amg_data); */ if (add_last_lvl == -1 ) add_end = num_levels; else add_end = add_last_lvl; num_rows_L = 0; for (i=addlvl; i < add_end; i++) { A_tmp = A_array[i]; A_tmp_diag = hypre_ParCSRMatrixDiag(A_tmp); num_rows_tmp = hypre_CSRMatrixNumRows(A_tmp_diag); num_rows_L += num_rows_tmp; } Rtilde = hypre_CTAlloc(hypre_ParVector, 1, HYPRE_MEMORY_HOST); Rtilde_local = hypre_SeqVectorCreate(num_rows_L); hypre_SeqVectorInitialize(Rtilde_local); hypre_ParVectorLocalVector(Rtilde) = Rtilde_local; hypre_ParVectorOwnsData(Rtilde) = 1; Xtilde = hypre_CTAlloc(hypre_ParVector, 1, HYPRE_MEMORY_HOST); Xtilde_local = hypre_SeqVectorCreate(num_rows_L); hypre_SeqVectorInitialize(Xtilde_local); hypre_ParVectorLocalVector(Xtilde) = Xtilde_local; hypre_ParVectorOwnsData(Xtilde) = 1; x_data = hypre_VectorData(hypre_ParVectorLocalVector(Xtilde)); r_data = hypre_VectorData(hypre_ParVectorLocalVector(Rtilde)); D_inv = hypre_CTAlloc(HYPRE_Real, num_rows_L, HYPRE_MEMORY_HOST); l1_start = 0; for (level=addlvl; level < add_end; level++) { if (level != 0) { tmp_data = hypre_VectorData(hypre_ParVectorLocalVector(F_array[level])); if (tmp_data) { hypre_TFree(tmp_data, hypre_VectorMemoryLocation(hypre_ParVectorLocalVector(F_array[level]))); } hypre_VectorData(hypre_ParVectorLocalVector(F_array[level])) = &r_data[l1_start]; hypre_VectorOwnsData(hypre_ParVectorLocalVector(F_array[level])) = 0; tmp_data = hypre_VectorData(hypre_ParVectorLocalVector(U_array[level])); if (tmp_data) { hypre_TFree(tmp_data, hypre_VectorMemoryLocation(hypre_ParVectorLocalVector(U_array[level]))); } hypre_VectorData(hypre_ParVectorLocalVector(U_array[level])) = &x_data[l1_start]; hypre_VectorOwnsData(hypre_ParVectorLocalVector(U_array[level])) = 0; } A_tmp = A_array[level]; A_tmp_diag = hypre_ParCSRMatrixDiag(A_tmp); num_rows_tmp = hypre_CSRMatrixNumRows(A_tmp_diag); if (add_rlx == 0) { /*HYPRE_Real rlx_wt = relax_weight[level];*/ HYPRE_Int *A_tmp_diag_i = hypre_CSRMatrixI(A_tmp_diag); HYPRE_Real *A_tmp_diag_data = hypre_CSRMatrixData(A_tmp_diag); #ifdef HYPRE_USING_OPENMP #pragma omp for private(i) HYPRE_SMP_SCHEDULE #endif for (i=0; i < num_rows_tmp; i++) D_inv[l1_start+i] = add_rlx_wt/A_tmp_diag_data[A_tmp_diag_i[i]]; } else { l1_norms = l1_norms_ptr[level]; #ifdef HYPRE_USING_OPENMP #pragma omp for private(i) HYPRE_SMP_SCHEDULE #endif for (i=0; i < num_rows_tmp; i++) D_inv[l1_start+i] = 1.0/l1_norms[i]; } l1_start += num_rows_tmp; } hypre_ParAMGDataDinv(amg_data) = D_inv; hypre_ParAMGDataRtilde(amg_data) = Rtilde; hypre_ParAMGDataXtilde(amg_data) = Xtilde; return Solve_err_flag; }
udata.c
#include "../umesh.h" #include "../shared.h" // The indirections are messy but quite compact. #define XZPLANE_FACE_INDEX(ii, jj, kk) \ (((ii) * (3 * nx * ny + nx + ny)) + (nx * ny) + ((jj) * (2 * nx + 1)) + \ (((jj) < ny) ? (2 * (kk) + 1) : (kk))) #define XYPLANE_FACE_INDEX(ii, jj, kk) \ (((ii) * (3 * nx * ny + nx + ny)) + ((jj)*nx) + (kk)) #define YZPLANE_FACE_INDEX(ii, jj, kk) \ (((ii) * (3 * nx * ny + nx + ny)) + (nx * ny) + ((jj) * (2 * nx + 1)) + \ (2 * (kk))) // Initialises the offsets between faces and nodes void init_faces_to_nodes_offsets_3d(UnstructuredMesh* umesh) { #pragma omp parallel for for (int ff = 0; ff < umesh->nfaces + 1; ++ff) { umesh->faces_to_nodes_offsets[(ff)] = ff * NNODES_BY_FACE; if (ff < umesh->nfaces) { umesh->faces_cclockwise_cell[(ff)] = -1; } } } // Initialises the offsets between cells and faces void init_cells_to_faces_offsets_3d(UnstructuredMesh* umesh) { #pragma omp parallel for for (int cc = 0; cc < umesh->ncells + 1; ++cc) { umesh->cells_to_faces_offsets[(cc)] = cc * NFACES_BY_CELL; } } // Initialises the offsets between nodes and faces void init_nodes_to_faces_offsets_3d(UnstructuredMesh* umesh) { #pragma omp parallel for for (int nn = 0; nn < umesh->nnodes + 1; ++nn) { umesh->nodes_to_faces_offsets[(nn)] = nn * NFACES_BY_NODE; } } // Initialises the connectivity between faces and cells void init_faces_to_cells_3d(const int nx, const int ny, const int nz, UnstructuredMesh* umesh) { #pragma omp parallel for for (int ii = 0; ii < nz + 1; ++ii) { // All front oriented faces for (int jj = 0; jj < ny; ++jj) { for (int kk = 0; kk < nx; ++kk) { const int face_index = XYPLANE_FACE_INDEX(ii, jj, kk); umesh->faces_to_cells0[(face_index)] = (ii < nz) ? (ii * nx * ny) + (jj * nx) + (kk) : -1; umesh->faces_to_cells1[(face_index)] = (ii > 0) ? ((ii - 1) * nx * ny) + (jj * nx) + (kk) : -1; } } if (ii < nz) { // Side oriented faces for (int jj = 0; jj < ny; ++jj) { for (int kk = 0; kk < nx + 1; ++kk) { const int face_index = YZPLANE_FACE_INDEX(ii, jj, kk); umesh->faces_to_cells0[(face_index)] = (kk < nx) ? (ii * nx * ny) + (jj * nx) + (kk) : -1; umesh->faces_to_cells1[(face_index)] = (kk > 0) ? (ii * nx * ny) + (jj * nx) + (kk - 1) : -1; } } } if (ii < nz) { // Bottom oriented faces for (int jj = 0; jj < ny + 1; ++jj) { for (int kk = 0; kk < nx; ++kk) { const int face_index = XZPLANE_FACE_INDEX(ii, jj, kk); umesh->faces_to_cells0[(face_index)] = (jj < ny) ? (ii * nx * ny) + (jj * nx) + (kk) : -1; umesh->faces_to_cells1[(face_index)] = (jj > 0) ? (ii * nx * ny) + ((jj - 1) * nx) + (kk) : -1; } } } } } // Initialises the connectivity between nodes and faces void init_nodes_to_faces_3d(const int nx, const int ny, const int nz, UnstructuredMesh* umesh) { // Determine the connectivity of nodes to faces #pragma omp parallel for for (int ii = 0; ii < (nz + 1); ++ii) { for (int jj = 0; jj < (ny + 1); ++jj) { for (int kk = 0; kk < (nx + 1); ++kk) { const int node_index = (ii * (nx + 1) * (ny + 1)) + (jj * (nx + 1)) + (kk); const int node_to_faces_off = umesh->nodes_to_faces_offsets[(node_index)]; umesh->nodes_to_faces[(node_to_faces_off + 0)] = (ii < nz && kk < nx) ? XZPLANE_FACE_INDEX(ii, jj, kk) : -1; umesh->nodes_to_faces[(node_to_faces_off + 1)] = (jj < ny && kk < nx) ? XYPLANE_FACE_INDEX(ii, jj, kk) : -1; umesh->nodes_to_faces[(node_to_faces_off + 2)] = (ii < nz && jj < ny) ? YZPLANE_FACE_INDEX(ii, jj, kk) : -1; umesh->nodes_to_faces[(node_to_faces_off + 3)] = (ii < nz && kk > 0) ? XZPLANE_FACE_INDEX(ii, jj, kk - 1) : -1; umesh->nodes_to_faces[(node_to_faces_off + 4)] = (jj < ny && kk > 0) ? XYPLANE_FACE_INDEX(ii, jj, kk - 1) : -1; umesh->nodes_to_faces[(node_to_faces_off + 5)] = (ii > 0 && kk < nx) ? XZPLANE_FACE_INDEX(ii - 1, jj, kk) : -1; umesh->nodes_to_faces[(node_to_faces_off + 6)] = (ii > 0 && jj < ny) ? YZPLANE_FACE_INDEX(ii - 1, jj, kk) : -1; umesh->nodes_to_faces[(node_to_faces_off + 7)] = (ii > 0 && kk > 0) ? XZPLANE_FACE_INDEX(ii - 1, jj, kk - 1) : -1; umesh->nodes_to_faces[(node_to_faces_off + 8)] = (jj > 0 && kk < nx) ? XYPLANE_FACE_INDEX(ii, jj - 1, kk) : -1; umesh->nodes_to_faces[(node_to_faces_off + 9)] = (jj > 0 && kk > 0) ? XYPLANE_FACE_INDEX(ii, jj - 1, kk - 1) : -1; umesh->nodes_to_faces[(node_to_faces_off + 10)] = (ii > 0 && jj > 0) ? YZPLANE_FACE_INDEX(ii - 1, jj - 1, kk) : -1; umesh->nodes_to_faces[(node_to_faces_off + 11)] = (ii < nz && jj > 0) ? YZPLANE_FACE_INDEX(ii, jj - 1, kk) : -1; } } } } // Initialises the cells to nodes connectivity void init_cells_to_nodes_3d(const int nx, const int ny, const int nz, UnstructuredMesh* umesh) { #pragma omp parallel for for (int cc = 0; cc < umesh->ncells + 1; ++cc) { umesh->cells_to_nodes_offsets[(cc)] = cc * NNODES_BY_CELL; } #pragma omp parallel for for (int ii = 0; ii < nz; ++ii) { for (int jj = 0; jj < ny; ++jj) { for (int kk = 0; kk < nx; ++kk) { const int index = (ii * nx * ny) + (jj * nx) + (kk); // Simple closed form calculation for the nodes surrounding a cell umesh->cells_to_nodes[(index * NNODES_BY_CELL) + 0] = (ii * (nx + 1) * (ny + 1)) + (jj * (nx + 1)) + (kk); umesh->cells_to_nodes[(index * NNODES_BY_CELL) + 1] = (ii * (nx + 1) * (ny + 1)) + (jj * (nx + 1)) + (kk + 1); umesh->cells_to_nodes[(index * NNODES_BY_CELL) + 2] = (ii * (nx + 1) * (ny + 1)) + ((jj + 1) * (nx + 1)) + (kk + 1); umesh->cells_to_nodes[(index * NNODES_BY_CELL) + 3] = (ii * (nx + 1) * (ny + 1)) + ((jj + 1) * (nx + 1)) + (kk); umesh->cells_to_nodes[(index * NNODES_BY_CELL) + 4] = ((ii + 1) * (nx + 1) * (ny + 1)) + (jj * (nx + 1)) + (kk); umesh->cells_to_nodes[(index * NNODES_BY_CELL) + 5] = ((ii + 1) * (nx + 1) * (ny + 1)) + (jj * (nx + 1)) + (kk + 1); umesh->cells_to_nodes[(index * NNODES_BY_CELL) + 6] = ((ii + 1) * (nx + 1) * (ny + 1)) + ((jj + 1) * (nx + 1)) + (kk + 1); umesh->cells_to_nodes[(index * NNODES_BY_CELL) + 7] = ((ii + 1) * (nx + 1) * (ny + 1)) + ((jj + 1) * (nx + 1)) + (kk); } } } } // Initialises the connectivity between faces and nodes void init_faces_to_nodes_3d(const int nx, const int ny, const int nz, UnstructuredMesh* umesh) { // Connectivity of faces to nodes, the nodes are stored in a counter-clockwise // ordering around the face #pragma omp parallel for for (int ii = 0; ii < nz + 1; ++ii) { // Add the front faces for (int jj = 0; jj < ny; ++jj) { for (int kk = 0; kk < nx; ++kk) { const int face_index = XYPLANE_FACE_INDEX(ii, jj, kk); const int face_to_node_off = umesh->faces_to_nodes_offsets[(face_index)]; // On the front face umesh->faces_to_nodes[(face_to_node_off + 0)] = (ii * (nx + 1) * (ny + 1)) + (jj * (nx + 1)) + (kk); umesh->faces_to_nodes[(face_to_node_off + 1)] = (ii * (nx + 1) * (ny + 1)) + (jj * (nx + 1)) + (kk + 1); umesh->faces_to_nodes[(face_to_node_off + 2)] = (ii * (nx + 1) * (ny + 1)) + ((jj + 1) * (nx + 1)) + (kk + 1); umesh->faces_to_nodes[(face_to_node_off + 3)] = (ii * (nx + 1) * (ny + 1)) + ((jj + 1) * (nx + 1)) + (kk); if (ii < nz + 1) { umesh->faces_cclockwise_cell[(face_index)] = (ii * nx * ny) + (jj * nx) + (kk); } } } if (ii < nz) { for (int jj = 0; jj < ny + 1; ++jj) { for (int kk = 0; kk < nx + 1; ++kk) { if (jj < ny) { // On the left face const int face_index = YZPLANE_FACE_INDEX(ii, jj, kk); const int face_to_node_off = umesh->faces_to_nodes_offsets[(face_index)]; umesh->faces_to_nodes[(face_to_node_off + 0)] = (ii * (nx + 1) * (ny + 1)) + (jj * (nx + 1)) + (kk); umesh->faces_to_nodes[(face_to_node_off + 1)] = (ii * (nx + 1) * (ny + 1)) + ((jj + 1) * (nx + 1)) + (kk); umesh->faces_to_nodes[(face_to_node_off + 2)] = ((ii + 1) * (nx + 1) * (ny + 1)) + ((jj + 1) * (nx + 1)) + (kk); umesh->faces_to_nodes[(face_to_node_off + 3)] = ((ii + 1) * (nx + 1) * (ny + 1)) + (jj * (nx + 1)) + (kk); if (kk < nx + 1) { umesh->faces_cclockwise_cell[(face_index)] = (ii * nx * ny) + (jj * nx) + (kk); } } if (kk < nx) { // On the bottom face const int face_index = XZPLANE_FACE_INDEX(ii, jj, kk); const int face_to_node_off = umesh->faces_to_nodes_offsets[(face_index)]; umesh->faces_to_nodes[(face_to_node_off + 0)] = (ii * (nx + 1) * (ny + 1)) + (jj * (nx + 1)) + (kk); umesh->faces_to_nodes[(face_to_node_off + 1)] = ((ii + 1) * (nx + 1) * (ny + 1)) + (jj * (nx + 1)) + (kk); umesh->faces_to_nodes[(face_to_node_off + 2)] = ((ii + 1) * (nx + 1) * (ny + 1)) + (jj * (nx + 1)) + (kk + 1); umesh->faces_to_nodes[(face_to_node_off + 3)] = (ii * (nx + 1) * (ny + 1)) + (jj * (nx + 1)) + (kk + 1); if (jj < ny + 1) { umesh->faces_cclockwise_cell[(face_index)] = (ii * nx * ny) + (jj * nx) + (kk); } } } } } } } // Initialises the connectivity between cells and faces void init_cells_to_faces_3d(const int nx, const int ny, const int nz, UnstructuredMesh* umesh) { #pragma omp parallel for for (int ii = 0; ii < nz; ++ii) { for (int jj = 0; jj < ny; ++jj) { for (int kk = 0; kk < nx; ++kk) { const int cell_index = (ii * nx * ny) + (jj * nx) + (kk); const int cell_to_faces_off = umesh->cells_to_faces_offsets[(cell_index)]; umesh->cells_to_faces[(cell_to_faces_off + 0)] = XYPLANE_FACE_INDEX(ii, jj, kk); umesh->cells_to_faces[(cell_to_faces_off + 1)] = YZPLANE_FACE_INDEX(ii, jj, kk); umesh->cells_to_faces[(cell_to_faces_off + 2)] = XZPLANE_FACE_INDEX(ii, jj, kk); umesh->cells_to_faces[(cell_to_faces_off + 3)] = YZPLANE_FACE_INDEX(ii, jj, kk + 1); umesh->cells_to_faces[(cell_to_faces_off + 4)] = XZPLANE_FACE_INDEX(ii, jj + 1, kk); umesh->cells_to_faces[(cell_to_faces_off + 5)] = XYPLANE_FACE_INDEX(ii + 1, jj, kk); } } } } // Initialises the list of nodes to nodes void init_nodes_to_nodes_3d(const int nx, const int ny, const int nz, UnstructuredMesh* umesh) { // Prefix sum to convert the counts to offsets #pragma omp parallel for for (int nn = 0; nn < umesh->nnodes + 1; ++nn) { umesh->nodes_to_nodes_offsets[(nn)] = nn * NNODES_BY_NODE; } // Initialise the offsets and list of nodes to cells, counter-clockwise order #pragma omp parallel for for (int ii = 0; ii < (nz + 1); ++ii) { for (int jj = 0; jj < (ny + 1); ++jj) { for (int kk = 0; kk < (nx + 1); ++kk) { const int node_index = (ii * (nx + 1) * (ny + 1)) + (jj * (nx + 1)) + (kk); int off = umesh->nodes_to_nodes_offsets[(node_index)]; // Initialise all to boundary for (int nn = 0; nn < NNODES_BY_NODE; ++nn) { umesh->nodes_to_nodes[(off + nn)] = -1; } if (kk < nx) { umesh->nodes_to_nodes[(off++)] = (ii * (nx + 1) * (ny + 1)) + (jj * (nx + 1)) + (kk + 1); } if (kk > 0) { umesh->nodes_to_nodes[(off++)] = (ii * (nx + 1) * (ny + 1)) + (jj * (nx + 1)) + (kk - 1); } if (jj < ny) { umesh->nodes_to_nodes[(off++)] = (ii * (nx + 1) * (ny + 1)) + ((jj + 1) * (nx + 1)) + (kk); } if (jj > 0) { umesh->nodes_to_nodes[(off++)] = (ii * (nx + 1) * (ny + 1)) + ((jj - 1) * (nx + 1)) + (kk); } if (ii < nz) { umesh->nodes_to_nodes[(off++)] = ((ii + 1) * (nx + 1) * (ny + 1)) + (jj * (nx + 1)) + (kk); } if (ii > 0) { umesh->nodes_to_nodes[(off++)] = ((ii - 1) * (nx + 1) * (ny + 1)) + (jj * (nx + 1)) + (kk); } } } } } // Initialises the list of nodes to cells void init_nodes_to_cells_3d(const int nx, const int ny, const int nz, Mesh* mesh, UnstructuredMesh* umesh) { #pragma omp parallel for for (int ii = 0; ii < (nz + 1); ++ii) { for (int jj = 0; jj < (ny + 1); ++jj) { for (int kk = 0; kk < (nx + 1); ++kk) { const int node_index = (ii * (nx + 1) * (ny + 1)) + (jj * (nx + 1)) + (kk); umesh->nodes_z0[(node_index)] = mesh->edgez[(ii)]; umesh->nodes_y0[(node_index)] = mesh->edgey[(jj)]; umesh->nodes_x0[(node_index)] = mesh->edgex[(kk)]; } } } // Override the original initialisation of the nodes_to_cells layout #pragma omp parallel for for (int ii = 0; ii < (nz + 1); ++ii) { for (int jj = 0; jj < (ny + 1); ++jj) { for (int kk = 0; kk < (nx + 1); ++kk) { const int node_index = (ii * (nx + 1) * (ny + 1)) + (jj * (nx + 1)) + (kk); // Fill in all of the cells that surround a node umesh->nodes_to_cells[node_index * NCELLS_BY_NODE + 0] = (ii > 0 && jj > 0 && kk > 0) ? ((ii - 1) * nx * ny) + ((jj - 1) * nx) + (kk - 1) : -1; umesh->nodes_to_cells[node_index * NCELLS_BY_NODE + 1] = (ii > 0 && jj > 0 && kk < nx) ? ((ii - 1) * nx * ny) + ((jj - 1) * nx) + (kk) : -1; umesh->nodes_to_cells[node_index * NCELLS_BY_NODE + 2] = (ii > 0 && jj < ny && kk > 0) ? ((ii - 1) * nx * ny) + (jj * nx) + (kk - 1) : -1; umesh->nodes_to_cells[node_index * NCELLS_BY_NODE + 3] = (ii > 0 && jj < ny && kk < nx) ? ((ii - 1) * nx * ny) + (jj * nx) + (kk) : -1; umesh->nodes_to_cells[node_index * NCELLS_BY_NODE + 4] = (ii < nz && jj > 0 && kk > 0) ? (ii * nx * ny) + ((jj - 1) * nx) + (kk - 1) : -1; umesh->nodes_to_cells[node_index * NCELLS_BY_NODE + 5] = (ii < nz && jj > 0 && kk < nx) ? (ii * nx * ny) + ((jj - 1) * nx) + (kk) : -1; umesh->nodes_to_cells[node_index * NCELLS_BY_NODE + 6] = (ii < nz && jj < ny && kk > 0) ? (ii * nx * ny) + (jj * nx) + (kk - 1) : -1; umesh->nodes_to_cells[node_index * NCELLS_BY_NODE + 7] = (ii < nz && jj < ny && kk < nx) ? (ii * nx * ny) + (jj * nx) + (kk) : -1; } } } } // Initialises the boundary normals void init_boundary_normals_3d(const int nx, const int ny, const int nz, UnstructuredMesh* umesh) { int* boundary_index; int* boundary_type; double* boundary_normal_x; double* boundary_normal_y; double* boundary_normal_z; allocate_host_int_data(&boundary_index, umesh->nnodes); allocate_host_int_data(&boundary_type, umesh->nboundary_nodes); allocate_host_data(&boundary_normal_x, umesh->nboundary_nodes); allocate_host_data(&boundary_normal_y, umesh->nboundary_nodes); allocate_host_data(&boundary_normal_z, umesh->nboundary_nodes); // Determine all of the boundary edges int nboundary_nodes = 0; for (int ii = 0; ii < (nz + 1); ++ii) { for (int jj = 0; jj < (ny + 1); ++jj) { for (int kk = 0; kk < (nx + 1); ++kk) { const int boundary_count = ((ii == 0) + (ii == nz) + (jj == 0) + (jj == ny) + (kk == 0) + (kk == nx)); const int node_index = (ii * (nx + 1) * (ny + 1)) + (jj * (nx + 1)) + (kk); // Check if we are on the edge if (boundary_count > 0) { int index = nboundary_nodes++; boundary_index[(node_index)] = index; if (boundary_count == 3) { boundary_type[(index)] = IS_CORNER; } else if (boundary_count == 2) { boundary_type[(index)] = IS_EDGE; if (kk == 0) { if (jj == 0) { boundary_normal_z[(index)] = 1.0; } else if (jj == ny) { boundary_normal_z[(index)] = 1.0; } else if (ii == 0) { boundary_normal_y[(index)] = 1.0; } else if (ii == nz) { boundary_normal_y[(index)] = 1.0; } } else if (jj == 0) { if (kk == nx) { boundary_normal_z[(index)] = 1.0; } else if (ii == 0) { boundary_normal_x[(index)] = 1.0; } else if (ii == nz) { boundary_normal_x[(index)] = 1.0; } } else if (ii == 0) { if (kk == nx) { boundary_normal_y[(index)] = 1.0; } else if (jj == ny) { boundary_normal_x[(index)] = 1.0; } } else if (kk == nx) { if (ii == nz) { boundary_normal_y[(index)] = 1.0; } else if (jj == ny) { boundary_normal_z[(index)] = 1.0; } } else if (jj == ny) { if (ii == nz) { boundary_normal_x[(index)] = 1.0; } } } else if (boundary_count == 1) { boundary_type[(index)] = IS_BOUNDARY; // TODO: WE DON'T NEED ANYTHING SPECIAL HERE AS WE KNOW THE // NORMALS // FROM THE CONSTRUCTION OF THE MESH, ALTHOUGH WE WILL NEED A // SUFFICIENT METHOD WHEN WE START USING MORE COMPLEX MESHES boundary_normal_x[(index)] = (kk == 0) ? -1.0 : ((kk == nx) ? 1.0 : 0.0); boundary_normal_y[(index)] = (jj == 0) ? -1.0 : ((jj == ny) ? 1.0 : 0.0); boundary_normal_z[(index)] = (ii == 0) ? -1.0 : ((ii == nz) ? 1.0 : 0.0); } } else { boundary_index[(node_index)] = IS_INTERIOR; } } } } copy_buffer(umesh->nboundary_nodes, &boundary_normal_x, &umesh->boundary_normal_x, 1); copy_buffer(umesh->nboundary_nodes, &boundary_normal_y, &umesh->boundary_normal_y, 1); copy_buffer(umesh->nboundary_nodes, &boundary_normal_z, &umesh->boundary_normal_z, 1); copy_int_buffer(umesh->nboundary_nodes, &boundary_type, &umesh->boundary_type, 1); copy_int_buffer(umesh->nnodes, &boundary_index, &umesh->boundary_index, 1); deallocate_host_int_data(boundary_type); deallocate_host_int_data(boundary_index); deallocate_host_data(boundary_normal_x); deallocate_host_data(boundary_normal_y); deallocate_host_data(boundary_normal_z); }
q3.c
#include <omp.h> #include <stdio.h> int main(int argc, char *argv[]) { int numThreads, tid; /* This creates a team of threads; each thread has own copy of variables */ #pragma omp parallel private(numThreads, tid) { tid = omp_get_thread_num(); printf("Hello World from thread number %d\n", tid); /* The following is executed by the master thread only (tid=0) */ if (tid == 0) { numThreads = omp_get_num_threads(); printf("Number of threads is %d\n", numThreads); } } return 0; }
3d7pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil with variable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*7); for(m=0; m<7;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 24; tile_size[1] = 24; tile_size[2] = 4; tile_size[3] = 64; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<7; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,12);t1++) { lbp=max(ceild(t1,2),ceild(24*t1-Nt+3,24)); ubp=min(floord(Nt+Nz-4,24),floord(12*t1+Nz+9,24)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(24*t2-Nz,4)),3*t1);t3<=min(min(min(floord(Nt+Ny-4,4),floord(12*t1+Ny+21,4)),floord(24*t2+Ny+20,4)),floord(24*t1-24*t2+Nz+Ny+19,4));t3++) { for (t4=max(max(max(0,ceild(3*t1-15,16)),ceild(24*t2-Nz-60,64)),ceild(4*t3-Ny-60,64));t4<=min(min(min(min(floord(4*t3+Nx,64),floord(Nt+Nx-4,64)),floord(12*t1+Nx+21,64)),floord(24*t2+Nx+20,64)),floord(24*t1-24*t2+Nz+Nx+19,64));t4++) { for (t5=max(max(max(max(max(0,12*t1),24*t1-24*t2+1),24*t2-Nz+2),4*t3-Ny+2),64*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,12*t1+23),24*t2+22),4*t3+2),64*t4+62),24*t1-24*t2+Nz+21);t5++) { for (t6=max(max(24*t2,t5+1),-24*t1+24*t2+2*t5-23);t6<=min(min(24*t2+23,-24*t1+24*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(4*t3,t5+1);t7<=min(4*t3+3,t5+Ny-2);t7++) { lbv=max(64*t4,t5+1); ubv=min(64*t4+63,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<7;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
spmspv3.h
/****************************************************************************** * ** Copyright (c) 2016, Intel Corporation ** * ** All rights reserved. ** * ** ** * ** Redistribution and use in source and binary forms, with or without ** * ** modification, are permitted provided that the following conditions ** * ** are met: ** * ** 1. Redistributions of source code must retain the above copyright ** * ** notice, this list of conditions and the following disclaimer. ** * ** 2. Redistributions in binary form must reproduce the above copyright ** * ** notice, this list of conditions and the following disclaimer in the ** * ** documentation and/or other materials provided with the distribution. ** * ** 3. Neither the name of the copyright holder nor the names of its ** * ** contributors may be used to endorse or promote products derived ** * ** from this software without specific prior written permission. ** * ** ** * ** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ** * ** "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ** * ** LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ** * ** A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ** * ** HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ** * ** SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED ** * ** TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR ** * ** PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF ** * ** LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING ** * ** NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ** * ** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * ******************************************************************************/ /* Michael Anderson (Intel Corp.) * * ******************************************************************************/ #ifndef SRC_SINGLENODE_SPMSPV3_H_ #define SRC_SINGLENODE_SPMSPV3_H_ #include <xmmintrin.h> #include "GMDP/utils/bitvector.h" template <typename Ta, typename Tx, typename Tvp, typename Ty> void my_spmspv3(int* row_inds, int* col_ptrs, int* col_indices, Ta* vals, int num_partitions, int* row_pointers, int* col_starts, int* edge_pointers, Tx* xvalue, int * xbit_vector, Tvp * vpvalue, int * vpbit_vector, Ty * yvalue, int * ybit_vector, int m, int n, int* nnz, void (*op_mul)(const Ta&, const Tx&, const Tvp&, Ty*, void*), void (*op_add)(const Ty&, const Ty&, Ty*, void*), void* vsp) { #pragma omp parallel for schedule(dynamic, 1) for (int p = 0; p < num_partitions; p++) { // For each column const int* column_offset = col_indices + col_starts[p]; const int* partitioned_row_offset = row_inds + edge_pointers[p]; const Ta* partitioned_val_offset = vals + edge_pointers[p]; const int* col_ptrs_cur = col_ptrs + col_starts[p]; for (int j = 0; j < (col_starts[p + 1] - col_starts[p]) - 1; j++) { int col_index = col_indices[col_starts[p] + j]; if(get_bitvector(col_index, xbit_vector)) { //Tx Xval = xvalue[col_index]; _mm_prefetch((char*)(xvalue + column_offset[j + 4]), _MM_HINT_T0); int nz_idx = col_ptrs_cur[j]; for (; nz_idx < col_ptrs_cur[j + 1]; nz_idx++) { int row_ind = partitioned_row_offset[nz_idx]; //Tvp VPVal = vpvalue[row_ind]; assert(get_bitvector(row_ind, vpbit_vector)); Ta Aval = partitioned_val_offset[nz_idx]; if(get_bitvector(row_ind, ybit_vector)) { Ty tmp_mul; //Ty tmp_add; op_mul(Aval, xvalue[col_index], vpvalue[row_ind], &tmp_mul, vsp); op_add(yvalue[row_ind], tmp_mul, &yvalue[row_ind], vsp); //yvalue[row_ind] = tmp_add; } else { //Ty tmp_mul; //op_mul(Aval, Xval, VPVal, &tmp_mul, vsp); op_mul(Aval, xvalue[col_index], vpvalue[row_ind], &yvalue[row_ind], vsp); //yvalue[row_ind] = tmp_mul; set_bitvector(row_ind, ybit_vector); } } } } } for (int p = 0; p < num_partitions; p++) { // nnz += new_nnz[p]; } *nnz = m * n; } template <typename Ta, typename Tx, typename Tvp, typename Ty> void my_coospmspv3(Ta* a, int* ia, int* ja, int num_partitions, int * partition_starts, Tx* xvalue, int * xbit_vector, Tvp * vpvalue, int * vpbit_vector, Ty * yvalue, int * ybit_vector, int m, int n, int* nnz, void (*op_mul)(const Ta&, const Tx&, const Tvp&, Ty*, void*), void (*op_add)(const Ty&, const Ty&, Ty*, void*), void* vsp) { #pragma omp parallel for schedule(dynamic, 1) for(int partition = 0 ; partition < num_partitions ; partition++) { for(int nz = partition_starts[partition] ; nz < partition_starts[partition+1] ; nz++) { int row = ia[nz]-1; int col = ja[nz]-1; #ifdef __DEBUG assert(row < m); assert(row >= 0); assert(col < n); assert(col >= 0); #endif if(get_bitvector(col, xbit_vector)) { Tvp VPVal = vpvalue[row]; assert(get_bitvector(row, vpbit_vector)); Ty tmp_mul; op_mul(a[nz], xvalue[col], vpvalue[row], &tmp_mul, vsp); bool row_exists = get_bitvector(row, ybit_vector); if(!row_exists) { yvalue[row] = tmp_mul; } else { Ty tmp_add = yvalue[row]; Ty yval; op_add(tmp_add, tmp_mul, &yval, vsp); yvalue[row] = yval; } set_bitvector(row, ybit_vector); } } } } template <typename Ta, typename Tx, typename Tvp, typename Ty> void mult_segment3(const DCSCTile<Ta>* tile, const DenseSegment<Tx> * segmentx, const DenseSegment<Tvp> * segmentvp, DenseSegment<Ty> * segmenty, void (*mul_fp)(const Ta&, const Tx&, const Tvp&, Ty*, void*), void (*add_fp)(const Ty&, const Ty&, Ty*, void*), void* vsp) { segmenty->alloc(); segmenty->initialize(); int nnz = 0; my_spmspv3(tile->row_inds, tile->col_ptrs, tile->col_indices, tile->vals, tile->num_partitions, tile->row_pointers, tile->col_starts, tile->edge_pointers, segmentx->properties->value, segmentx->properties->bit_vector, segmentvp->properties->value, segmentvp->properties->bit_vector, segmenty->properties->value, segmenty->properties->bit_vector, tile->m, tile->n, (&nnz), mul_fp, add_fp, vsp); segmenty->properties->nnz = segmenty->compute_nnz(); } template <typename Ta, typename Tx, typename Tvp, typename Ty> void mult_segment3(const COOTile<Ta>* tile, const DenseSegment<Tx> * segmentx, const DenseSegment<Tvp> * segmentvp, DenseSegment<Ty>* segmenty, void (*mul_fp)(const Ta&, const Tx&, const Tvp&, Ty*, void*), void (*add_fp)(const Ty&, const Ty&, Ty*, void*), void* vsp) { segmenty->alloc(); segmenty->initialize(); int nnz = 0; my_coospmspv3(tile->a, tile->ia, tile->ja, tile->num_partitions, tile->partition_start, segmentx->properties->value, segmentx->properties->bit_vector, segmentvp->properties->value, segmentvp->properties->bit_vector, segmenty->properties->value, segmenty->properties->bit_vector, tile->m, tile->n, (&nnz), mul_fp, add_fp, vsp); segmenty->properties->nnz = segmenty->compute_nnz(); } #endif // SRC_SINGLENODE_SPMSPV3_H_
lcs.c
/* Tempo Sequencial: Length of Longest Common Substring is 14 real 0m3,049s user 0m2,584s sys 0m0,459s Length of Longest Common Substring is 14 real 0m3,047s user 0m2,704s sys 0m0,336s Length of Longest Common Substring is 14 real 0m3,046s user 0m2,618s sys 0m0,423s Length of Longest Common Substring is 14 real 0m3,016s user 0m2,602s sys 0m0,409s Length of Longest Common Substring is 14 real 0m3,012s user 0m2,577s sys 0m0,429s Tempo paralelo Length of Longest Common Substring is 14 real 0m1,598s user 0m2,706s sys 0m0,417s Length of Longest Common Substring is 14 real 0m1,647s user 0m2,736s sys 0m0,469s Length of Longest Common Substring is 14 real 0m1,631s user 0m2,772s sys 0m0,413s Length of Longest Common Substring is 14 real 0m1,698s user 0m2,809s sys 0m0,463s Length of Longest Common Substring is 14 real 0m1,652s user 0m2,792s sys 0m0,425s Speedup ~= 1.9080100125156445 */ /* Dynamic Programming solution to find length of the longest common substring Adapted from http://www.geeksforgeeks.org/longest-common-substring/ */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <omp.h> // Read input files char *readFile(char *filename, int *size) { char *buffer = NULL; *size = 0; /* Open your_file in read-only mode */ FILE *fp = fopen(filename, "r"); /* Get the buffer size */ fseek(fp, 0, SEEK_END); /* Go to end of file */ *size = ftell(fp); /* How many bytes did we pass ? */ /* Set position of stream to the beginning */ rewind(fp); /* Allocate the buffer (no need to initialize it with calloc) */ buffer = malloc((*size + 1) * sizeof(*buffer)); /* size + 1 byte for the \0 */ /* Read the file into the buffer */ int err = fread(buffer, *size, 1, fp); /* Read 1 chunk of size bytes from fp into buffer */ /* NULL-terminate the buffer */ buffer[*size] = '\0'; /* Print it ! */ // printf("%s\n", buffer); return (buffer); } // A utility function to find maximum of two integers int max(int a, int b) { return (a > b) ? a : b; } /* Returns length of longest common substring of X[0..m-1] and Y[0..n-1] */ int LCSubStr(char *x, char *y, int m, int n) { // Create a table to store lengths of longest common suffixes of // substrings. Notethat LCSuff[i][j] contains length of longest // common suffix of X[0..i-1] and Y[0..j-1]. The first row and // first column entries have no logical meaning, they are used only // for simplicity of program int **LCSuff = (int **)malloc((m + 1) * sizeof(int *)); for (int i = 0; i < m + 1; i++) LCSuff[i] = (int *)malloc((n + 1) * sizeof(int)); int result = 0; // To store length of the longest common substring /* Following steps build LCSuff[m+1][n+1] in bottom up fashion. */ #pragma omp parallel for collapse(2) reduction(max:result) schedule(guided) for (int i = 0; i <= m; i++) { for (int j = 0; j <= n; j++) { if (i == 0 || j == 0) LCSuff[i][j] = 0; else if (x[i - 1] == y[j - 1]) { LCSuff[i][j] = LCSuff[i - 1][j - 1] + 1; result = max(result, LCSuff[i][j]); } else LCSuff[i][j] = 0; } } return result; } /* Driver program to test above function */ int main() { int m, n; char *x = readFile("seqA.txt", &m); char *y = readFile("seqB.txt", &n); omp_set_num_threads(2); printf("\nLength of Longest Common Substring is %d\n", LCSubStr(x, y, m, n)); return 0; }
GB_unop__minv_uint8_uint8.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__minv_uint8_uint8) // op(A') function: GB (_unop_tran__minv_uint8_uint8) // C type: uint8_t // A type: uint8_t // cast: uint8_t cij = aij // unaryop: cij = GB_IMINV_UNSIGNED (aij, 8) #define GB_ATYPE \ uint8_t #define GB_CTYPE \ uint8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_UNSIGNED (x, 8) ; // casting #define GB_CAST(z, aij) \ uint8_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint8_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint8_t z = aij ; \ Cx [pC] = GB_IMINV_UNSIGNED (z, 8) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_UINT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__minv_uint8_uint8) ( uint8_t *Cx, // Cx and Ax may be aliased const uint8_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint8_t aij = Ax [p] ; uint8_t z = aij ; Cx [p] = GB_IMINV_UNSIGNED (z, 8) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint8_t aij = Ax [p] ; uint8_t z = aij ; Cx [p] = GB_IMINV_UNSIGNED (z, 8) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__minv_uint8_uint8) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
imd_deform.c
/****************************************************************************** * * IMD -- The ITAP Molecular Dynamics Program * * Copyright 1996-2007 Institute for Theoretical and Applied Physics, * University of Stuttgart, D-70550 Stuttgart * ******************************************************************************/ /****************************************************************************** * * imd_deform.c -- deform sample * ******************************************************************************/ /****************************************************************************** * $Revision$ * $Date$ ******************************************************************************/ #include "imd.h" #ifdef HOMDEF /* homogeneous deformation with pbc */ /***************************************************************************** * * lin_deform() * *****************************************************************************/ #ifdef TWOD void lin_deform(vektor dx, vektor dy, real scale) #else void lin_deform(vektor dx, vektor dy, vektor dz, real scale) #endif { int k; real tmpbox[3]; #ifdef DEBUG printf("in lindef, myid: %d scale %lf dx %lf %lf %lf dy %lf %lf %lf dz %lf %lf %lf \n",myid,scale,dx.x,dx.y,dx.z,dy.x,dy.y,dy.z,dz.x,dz.y,dz.z);fflush(stdout); #endif #ifdef _OPENMP #pragma omp parallel for #endif for (k=0; k<NCELLS; ++k) { int i; cell *p; real tmport[3]; p = CELLPTR(k); for (i=0; i<p->n; ++i) { /* transform atom positions */ #ifdef TWOD tmport[0] = dx.x * ORT(p,i,X) + dx.y * ORT(p,i,Y); tmport[1] = dy.x * ORT(p,i,X) + dy.y * ORT(p,i,Y); #else tmport[0] = dx.x * ORT(p,i,X) + dx.y * ORT(p,i,Y) + dx.z * ORT(p,i,Z); tmport[1] = dy.x * ORT(p,i,X) + dy.y * ORT(p,i,Y) + dy.z * ORT(p,i,Z); tmport[2] = dz.x * ORT(p,i,X) + dz.y * ORT(p,i,Y) + dz.z * ORT(p,i,Z); #endif ORT(p,i,X) += scale * tmport[0]; ORT(p,i,Y) += scale * tmport[1]; #ifndef TWOD ORT(p,i,Z) += scale * tmport[2]; #endif } } /* transform first box vector */ tmpbox[0] = scale * SPROD(dx,box_x); tmpbox[1] = scale * SPROD(dy,box_x); #ifndef TWOD tmpbox[2] = scale * SPROD(dz,box_x); #endif box_x.x += tmpbox[0]; box_x.y += tmpbox[1]; #ifndef TWOD box_x.z += tmpbox[2]; #endif /* transform second box vector */ tmpbox[0] = scale * SPROD(dx,box_y); tmpbox[1] = scale * SPROD(dy,box_y); #ifndef TWOD tmpbox[2] = scale * SPROD(dz,box_y); #endif box_y.x += tmpbox[0]; box_y.y += tmpbox[1]; #ifndef TWOD box_y.z += tmpbox[2]; #endif /* transform third box vector */ #ifndef TWOD tmpbox[0] = scale * SPROD(dx,box_z); tmpbox[1] = scale * SPROD(dy,box_z); tmpbox[2] = scale * SPROD(dz,box_z); box_z.x += tmpbox[0]; box_z.y += tmpbox[1]; box_z.z += tmpbox[2]; #endif /* apply box changes */ make_box(); #ifdef DAMP /* deform the stadium correspondingly */ tmpbox[0] = scale * SPROD(dx,center); tmpbox[1] = scale * SPROD(dy,center); #ifndef TWOD tmpbox[2] = scale * SPROD(dz,center); #endif #endif } /* lin_deform */ /***************************************************************************** * * relax_pressure() * *****************************************************************************/ void relax_pressure() { real Epot, Temp, vol; #ifdef TWOD vektor dx = {0.0, 0.0}, dy = {0.0, 0.0}; #else vektor dx = {0.0, 0.0, 0.0}, dy = {0.0, 0.0, 0.0}, dz = {0.0, 0.0, 0.0}; #endif sym_tensor pt; real pp; #ifdef STRESS_TENS /* here, we support relaxation to arbitrary external pressure tensor */ calc_tot_presstens(); pt.xx = tot_presstens.xx / volume - presstens_ext.xx; pt.yy = tot_presstens.yy / volume - presstens_ext.yy; pt.xy = tot_presstens.xy / volume - presstens_ext.xy; #ifndef TWOD pt.zz = tot_presstens.zz / volume - presstens_ext.zz; pt.yz = tot_presstens.yz / volume - presstens_ext.yz; pt.zx = tot_presstens.zx / volume - presstens_ext.zx; /* we want to relax stresses e.g. orthogonal to a strained direction */ /* pp = (pt.xx + pt.yy + pt.zz) / 3.0; */ pp = (pt.xx*relax_dirs.x + pt.yy*relax_dirs.y + pt.zz*relax_dirs.z) / (relax_dirs.x+ relax_dirs.y+relax_dirs.z); #else pp = (pt.xx + pt.yy) / 2.0; #endif if ((relax_mode == RELAX_FULL) || (relax_mode == RELAX_AXIAL)) { dx.x = pp / bulk_module + (pt.xx - pp) / shear_module; dy.y = pp / bulk_module + (pt.yy - pp) / shear_module; #ifndef TWOD dz.z = pp / bulk_module + (pt.zz - pp) / shear_module; #endif } else { dx.x = pp / bulk_module; dy.y = pp / bulk_module; #ifndef TWOD dz.z = pp / bulk_module; #endif } if (relax_mode == RELAX_FULL) { dx.y = dy.x = pt.xy / shear_module; #ifndef TWOD dy.z = dz.y = pt.yz / shear_module; dz.x = dx.z = pt.zx / shear_module; #endif } #else /* not STRESS_TENS */ if (ensemble == ENS_CG) { Temp = 0.0; } else { #ifdef UNIAX Temp = 2.0 * tot_kin_energy / (nactive + nactive_rot); #elif defined(DAMP) Temp = 2.0 * tot_kin_energy / (nactive - n_damp); #else Temp = 2.0 * tot_kin_energy / nactive; #endif } #ifdef STM Temp = 2.0 * tot_kin_energy / (nactive - n_stadium); #endif vol = volume / natoms; pressure = Temp / vol + virial / (DIM * volume); /* here, we support only relaxation to scalar pressure zero */ dx.x = pressure / bulk_module ; dy.y = pressure / bulk_module ; #ifndef TWOD dz.z = pressure / bulk_module ; #endif #endif /* not STRESS_TENS */ if (relax_mode == RELAX_AXIAL) { dx.x *= relax_dirs.x; dy.y *= relax_dirs.y; #ifndef TWOD dz.z *= relax_dirs.z; #endif } #ifdef TWOD lin_deform(dx, dy, relax_rate); #else lin_deform(dx, dy, dz, relax_rate); #endif } #endif /* HOMDEF */ #ifdef DEFORM /***************************************************************************** * * deform_sample() * *****************************************************************************/ void deform_sample(void) { int k; /* loop over all atoms */ #ifdef _OPENMP #pragma omp parallel for #endif for (k=0; k<NCELLS; ++k) { int i; cell *p; int sort; vektor ort; real shear; p = CELLPTR(k); for (i=0; i<p->n; ++i) { sort = VSORTE(p,i); if ( *(shear_def + sort) == 1 ) { ort.x = ORT(p,i,X) - (deform_base + sort)->x; ort.y = ORT(p,i,Y) - (deform_base + sort)->y; #ifndef TWOD ort.z = ORT(p,i,Z) - (deform_base + sort)->z; #endif shear = SPROD( *(deform_shear + sort), ort); } else shear = 1.0; /* move particles with virtual types */ ORT(p,i,X) += shear * deform_size * (deform_shift + sort)->x; ORT(p,i,Y) += shear * deform_size * (deform_shift + sort)->y; #ifndef TWOD ORT(p,i,Z) += shear * deform_size * (deform_shift + sort)->z; #endif } } } #endif /* DEFORM */ #ifdef CYCLE void init_cycle(void) { int i, n; double T,A,a; if (0==myid) { T=1.0/lindef_freq; printf("CYCLE: cyclic deformation with frequency f = %f -> T = 1/f = %f\n",lindef_freq,T); printf("CYCLE: desired strain amplitude: %f\n",lindef_size); n = (int) (T/4/timestep); printf("CYCLE: -> timesteps till max. strain: %d\n",n); if (n*timestep*4.0 != T) error("CYCLE: 1/(4*lindef_freq) is not an integer value."); } } #endif
GB_AxB_saxpy3_symbolic_template.c
//------------------------------------------------------------------------------ // GB_AxB_saxpy3_symbolic_template: symbolic analysis for GB_AxB_saxpy3 //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // Symbolic analysis for C=A*B, C<M>=A*B or C<!M>=A*B, via GB_AxB_saxpy3. // Coarse tasks compute nnz (C (:,j)) for each of their vectors j. Fine tasks // just scatter the mask M into the hash table. This phase does not depend on // the semiring, nor does it depend on the type of C, A, or B. It does access // the values of M, if the mask matrix M is present and not structural. // If B is hypersparse, C must also be hypersparse. // Otherwise, C must be sparse. // The sparsity of A and B are #defined' constants for this method, // as is the 3 cases of the mask (no M, M, or !M). #include "GB_AxB_saxpy3.h" #include "GB_AxB_saxpy3_template.h" #include "GB_atomics.h" #include "GB_bracket.h" #include "GB_unused.h" #define GB_META16 #include "GB_meta16_definitions.h" void GB_EVAL2 (GB (AxB_saxpy3_sym), GB_MASK_A_B_SUFFIX) ( GrB_Matrix C, // Cp is computed for coarse tasks #if ( !GB_NO_MASK ) const GrB_Matrix M, // mask matrix M const bool Mask_struct, // M structural, or not const bool M_in_place, #endif const GrB_Matrix A, // A matrix; only the pattern is accessed const GrB_Matrix B, // B matrix; only the pattern is accessed GB_saxpy3task_struct *SaxpyTasks, // list of tasks, and workspace const int ntasks, // total number of tasks const int nfine, // number of fine tasks const int nthreads // number of threads ) { //-------------------------------------------------------------------------- // get M, A, B, and C //-------------------------------------------------------------------------- int64_t *restrict Cp = C->p ; const int64_t cvlen = C->vlen ; const int64_t *restrict Bp = B->p ; const int64_t *restrict Bh = B->h ; const int8_t *restrict Bb = B->b ; const int64_t *restrict Bi = B->i ; const int64_t bvlen = B->vlen ; const bool B_jumbled = B->jumbled ; ASSERT (GB_B_IS_SPARSE == GB_IS_SPARSE (B)) ; ASSERT (GB_B_IS_HYPER == GB_IS_HYPERSPARSE (B)) ; ASSERT (GB_B_IS_BITMAP == GB_IS_BITMAP (B)) ; ASSERT (GB_B_IS_FULL == GB_IS_FULL (B)) ; const int64_t *restrict Ap = A->p ; const int64_t *restrict Ah = A->h ; const int8_t *restrict Ab = A->b ; const int64_t *restrict Ai = A->i ; const int64_t anvec = A->nvec ; const int64_t avlen = A->vlen ; const bool A_jumbled = A->jumbled ; ASSERT (GB_A_IS_SPARSE == GB_IS_SPARSE (A)) ; ASSERT (GB_A_IS_HYPER == GB_IS_HYPERSPARSE (A)) ; ASSERT (GB_A_IS_BITMAP == GB_IS_BITMAP (A)) ; ASSERT (GB_A_IS_FULL == GB_IS_FULL (A)) ; #if ( !GB_NO_MASK ) const int64_t *restrict Mp = M->p ; const int64_t *restrict Mh = M->h ; const int8_t *restrict Mb = M->b ; const int64_t *restrict Mi = M->i ; const GB_void *restrict Mx = (GB_void *) (Mask_struct ? NULL : (M->x)) ; size_t msize = M->type->size ; int64_t mnvec = M->nvec ; int64_t mvlen = M->vlen ; const bool M_is_hyper = GB_IS_HYPERSPARSE (M) ; const bool M_is_bitmap = GB_IS_BITMAP (M) ; const bool M_jumbled = GB_JUMBLED (M) ; #endif //========================================================================== // phase1: count nnz(C(:,j)) for coarse tasks, scatter M for fine tasks //========================================================================== // At this point, all of Hf [...] is zero, for all tasks. // Hi and Hx are not initialized. int taskid ; #pragma omp parallel for num_threads(nthreads) schedule(static,1) for (taskid = 0 ; taskid < ntasks ; taskid++) { //---------------------------------------------------------------------- // get the task descriptor //---------------------------------------------------------------------- int64_t hash_size = SaxpyTasks [taskid].hsize ; bool use_Gustavson = (hash_size == cvlen) ; if (taskid < nfine) { //------------------------------------------------------------------ // no work for fine tasks in phase1 if M is not present //------------------------------------------------------------------ #if ( !GB_NO_MASK ) { //-------------------------------------------------------------- // get the task descriptor //-------------------------------------------------------------- int64_t kk = SaxpyTasks [taskid].vector ; int64_t bjnz = (Bp == NULL) ? bvlen : (Bp [kk+1] - Bp [kk]) ; // no work to do if B(:,j) is empty if (bjnz == 0) continue ; // partition M(:,j) GB_GET_M_j ; // get M(:,j) int team_size = SaxpyTasks [taskid].team_size ; int leader = SaxpyTasks [taskid].leader ; int my_teamid = taskid - leader ; int64_t mystart, myend ; GB_PARTITION (mystart, myend, mjnz, my_teamid, team_size) ; mystart += pM_start ; myend += pM_start ; if (use_Gustavson) { //---------------------------------------------------------- // phase1: fine Gustavson task, C<M>=A*B or C<!M>=A*B //---------------------------------------------------------- // Scatter the values of M(:,j) into Hf. No atomics needed // since all indices i in M(;,j) are unique. Do not // scatter the mask if M(:,j) is a dense vector, since in // that case the numeric phase accesses M(:,j) directly, // not via Hf. if (mjnz > 0) { int8_t *restrict Hf = (int8_t *restrict) SaxpyTasks [taskid].Hf ; GB_SCATTER_M_j (mystart, myend, 1) ; } } else if (!M_in_place) { //---------------------------------------------------------- // phase1: fine hash task, C<M>=A*B or C<!M>=A*B //---------------------------------------------------------- // If M_in_place is true, this is skipped. The mask // M is dense, and is used in-place. // The least significant 2 bits of Hf [hash] is the flag f, // and the upper bits contain h, as (h,f). After this // phase1, if M(i,j)=1 then the hash table contains // ((i+1),1) in Hf [hash] at some location. // Later, the flag values of f = 2 and 3 are also used. // Only f=1 is set in this phase. // h == 0, f == 0: unoccupied and unlocked // h == i+1, f == 1: occupied with M(i,j)=1 int64_t *restrict Hf = (int64_t *restrict) SaxpyTasks [taskid].Hf ; int64_t hash_bits = (hash_size-1) ; // scan my M(:,j) for (int64_t pM = mystart ; pM < myend ; pM++) { GB_GET_M_ij (pM) ; // get M(i,j) if (!mij) continue ; // skip if M(i,j)=0 int64_t i = GBI (Mi, pM, mvlen) ; int64_t i_mine = ((i+1) << 2) + 1 ; // ((i+1),1) for (GB_HASH (i)) { int64_t hf ; // swap my hash entry into the hash table; // does the following using an atomic capture: // { hf = Hf [hash] ; Hf [hash] = i_mine ; } GB_ATOMIC_CAPTURE_INT64 (hf, Hf [hash], i_mine) ; if (hf == 0) break ; // success // i_mine has been inserted, but a prior entry was // already there. It needs to be replaced, so take // ownership of this displaced entry, and keep // looking until a new empty slot is found for it. i_mine = hf ; } } } } #endif } else { //------------------------------------------------------------------ // coarse tasks: compute nnz in each vector of A*B(:,kfirst:klast) //------------------------------------------------------------------ int64_t *restrict Hf = (int64_t *restrict) SaxpyTasks [taskid].Hf ; int64_t kfirst = SaxpyTasks [taskid].start ; int64_t klast = SaxpyTasks [taskid].end ; int64_t mark = 0 ; if (use_Gustavson) { //-------------------------------------------------------------- // phase1: coarse Gustavson task //-------------------------------------------------------------- #if ( GB_NO_MASK ) { // phase1: coarse Gustavson task, C=A*B #include "GB_AxB_saxpy3_coarseGus_noM_phase1.c" } #elif ( !GB_MASK_COMP ) { // phase1: coarse Gustavson task, C<M>=A*B #include "GB_AxB_saxpy3_coarseGus_M_phase1.c" } #else { // phase1: coarse Gustavson task, C<!M>=A*B #include "GB_AxB_saxpy3_coarseGus_notM_phase1.c" } #endif } else { //-------------------------------------------------------------- // phase1: coarse hash task //-------------------------------------------------------------- int64_t *restrict Hi = SaxpyTasks [taskid].Hi ; int64_t hash_bits = (hash_size-1) ; #if ( GB_NO_MASK ) { //---------------------------------------------------------- // phase1: coarse hash task, C=A*B //---------------------------------------------------------- #undef GB_CHECK_MASK_ij #include "GB_AxB_saxpy3_coarseHash_phase1.c" } #elif ( !GB_MASK_COMP ) { //---------------------------------------------------------- // phase1: coarse hash task, C<M>=A*B //---------------------------------------------------------- if (M_in_place) { //------------------------------------------------------ // M(:,j) is dense. M is not scattered into Hf. //------------------------------------------------------ #undef GB_CHECK_MASK_ij #define GB_CHECK_MASK_ij \ bool mij = \ (M_is_bitmap ? Mjb [i] : 1) && \ (Mask_struct ? 1 : (Mjx [i] != 0)) ; \ if (!mij) continue ; switch (msize) { default: case GB_1BYTE : #undef M_TYPE #define M_TYPE uint8_t #undef M_SIZE #define M_SIZE 1 #include "GB_AxB_saxpy3_coarseHash_phase1.c" break ; case GB_2BYTE : #undef M_TYPE #define M_TYPE uint16_t #include "GB_AxB_saxpy3_coarseHash_phase1.c" break ; case GB_4BYTE : #undef M_TYPE #define M_TYPE uint32_t #include "GB_AxB_saxpy3_coarseHash_phase1.c" break ; case GB_8BYTE : #undef M_TYPE #define M_TYPE uint64_t #include "GB_AxB_saxpy3_coarseHash_phase1.c" break ; case GB_16BYTE : #undef M_TYPE #define M_TYPE uint64_t #undef M_SIZE #define M_SIZE 2 #undef GB_CHECK_MASK_ij #define GB_CHECK_MASK_ij \ bool mij = \ (M_is_bitmap ? Mjb [i] : 1) && \ (Mask_struct ? 1 : \ (Mjx [2*i] != 0) || \ (Mjx [2*i+1] != 0)) ; \ if (!mij) continue ; #include "GB_AxB_saxpy3_coarseHash_phase1.c" break ; } } else { //------------------------------------------------------ // M is sparse and scattered into Hf //------------------------------------------------------ #include "GB_AxB_saxpy3_coarseHash_M_phase1.c" } } #else { //---------------------------------------------------------- // phase1: coarse hash task, C<!M>=A*B //---------------------------------------------------------- if (M_in_place) { //------------------------------------------------------ // M(:,j) is dense. M is not scattered into Hf. //------------------------------------------------------ #undef GB_CHECK_MASK_ij #define GB_CHECK_MASK_ij \ bool mij = \ (M_is_bitmap ? Mjb [i] : 1) && \ (Mask_struct ? 1 : (Mjx [i] != 0)) ; \ if (mij) continue ; switch (msize) { default: case GB_1BYTE : #undef M_TYPE #define M_TYPE uint8_t #undef M_SIZE #define M_SIZE 1 #include "GB_AxB_saxpy3_coarseHash_phase1.c" break ; case GB_2BYTE : #undef M_TYPE #define M_TYPE uint16_t #include "GB_AxB_saxpy3_coarseHash_phase1.c" break ; case GB_4BYTE : #undef M_TYPE #define M_TYPE uint32_t #include "GB_AxB_saxpy3_coarseHash_phase1.c" break ; case GB_8BYTE : #undef M_TYPE #define M_TYPE uint64_t #include "GB_AxB_saxpy3_coarseHash_phase1.c" break ; case GB_16BYTE : #undef M_TYPE #define M_TYPE uint64_t #undef M_SIZE #define M_SIZE 2 #undef GB_CHECK_MASK_ij #define GB_CHECK_MASK_ij \ bool mij = \ (M_is_bitmap ? Mjb [i] : 1) && \ (Mask_struct ? 1 : \ (Mjx [2*i] != 0) || \ (Mjx [2*i+1] != 0)) ; \ if (mij) continue ; #include "GB_AxB_saxpy3_coarseHash_phase1.c" break ; } } else { //------------------------------------------------------ // M is sparse and scattered into Hf //------------------------------------------------------ #include "GB_AxB_saxpy3_coarseHash_notM_phase1.c" } } #endif } } } //-------------------------------------------------------------------------- // check result for phase1 for fine tasks //-------------------------------------------------------------------------- #ifdef GB_DEBUG #if ( !GB_NO_MASK ) { for (taskid = 0 ; taskid < nfine ; taskid++) { int64_t kk = SaxpyTasks [taskid].vector ; ASSERT (kk >= 0 && kk < B->nvec) ; int64_t bjnz = (Bp == NULL) ? bvlen : (Bp [kk+1] - Bp [kk]) ; // no work to do if B(:,j) is empty if (bjnz == 0) continue ; int64_t hash_size = SaxpyTasks [taskid].hsize ; bool use_Gustavson = (hash_size == cvlen) ; int leader = SaxpyTasks [taskid].leader ; if (leader != taskid) continue ; GB_GET_M_j ; // get M(:,j) if (mjnz == 0) continue ; int64_t mjcount2 = 0 ; int64_t mjcount = 0 ; for (int64_t pM = pM_start ; pM < pM_end ; pM++) { GB_GET_M_ij (pM) ; // get M(i,j) if (mij) mjcount++ ; } if (use_Gustavson) { // phase1: fine Gustavson task, C<M>=A*B or C<!M>=A*B int8_t *restrict Hf = (int8_t *restrict) SaxpyTasks [taskid].Hf ; for (int64_t pM = pM_start ; pM < pM_end ; pM++) { GB_GET_M_ij (pM) ; // get M(i,j) int64_t i = GBI (Mi, pM, mvlen) ; ASSERT (Hf [i] == mij) ; } for (int64_t i = 0 ; i < cvlen ; i++) { ASSERT (Hf [i] == 0 || Hf [i] == 1) ; if (Hf [i] == 1) mjcount2++ ; } ASSERT (mjcount == mjcount2) ; } else if (!M_in_place) { // phase1: fine hash task, C<M>=A*B or C<!M>=A*B // h == 0, f == 0: unoccupied and unlocked // h == i+1, f == 1: occupied with M(i,j)=1 int64_t *restrict Hf = (int64_t *restrict) SaxpyTasks [taskid].Hf ; int64_t hash_bits = (hash_size-1) ; for (int64_t pM = pM_start ; pM < pM_end ; pM++) { GB_GET_M_ij (pM) ; // get M(i,j) if (!mij) continue ; // skip if M(i,j)=0 int64_t i = GBI (Mi, pM, mvlen) ; int64_t i_mine = ((i+1) << 2) + 1 ; // ((i+1),1) int64_t probe = 0 ; for (GB_HASH (i)) { int64_t hf = Hf [hash] ; if (hf == i_mine) { mjcount2++ ; break ; } ASSERT (hf != 0) ; probe++ ; ASSERT (probe < cvlen) ; } } ASSERT (mjcount == mjcount2) ; mjcount2 = 0 ; for (int64_t hash = 0 ; hash < hash_size ; hash++) { int64_t hf = Hf [hash] ; int64_t h = (hf >> 2) ; // empty (0), or a 1-based int64_t f = (hf & 3) ; // 0 if empty or 1 if occupied if (f == 1) ASSERT (h >= 1 && h <= cvlen) ; ASSERT (hf == 0 || f == 1) ; if (f == 1) mjcount2++ ; } ASSERT (mjcount == mjcount2) ; } } } #endif #endif }
DRB047-doallchar-orig-no.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <stdio.h> #include <stdlib.h> /* One dimension array computation with finer granularity than traditional 4 bytes. Dynamic tools monitoring 4-bytes elements may wrongfuly report race condition. */ char a[100]; int main() { int i; #pragma omp target data map(tofrom:a[0:100]) #pragma omp target parallel for for (i=0;i<100;i++) a[i]=i; #pragma omp target data map(tofrom:a[0:100]) #pragma omp target parallel for for (i=0;i<100;i++) a[i]=a[i]+1; for (i=0;i<100;i++) printf("%c\n",a[i]); return 0; }
GB_binop__islt_int32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__islt_int32) // A.*B function (eWiseMult): GB (_AemultB_08__islt_int32) // A.*B function (eWiseMult): GB (_AemultB_02__islt_int32) // A.*B function (eWiseMult): GB (_AemultB_04__islt_int32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__islt_int32) // A*D function (colscale): GB (_AxD__islt_int32) // D*A function (rowscale): GB (_DxB__islt_int32) // C+=B function (dense accum): GB (_Cdense_accumB__islt_int32) // C+=b function (dense accum): GB (_Cdense_accumb__islt_int32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__islt_int32) // C=scalar+B GB (_bind1st__islt_int32) // C=scalar+B' GB (_bind1st_tran__islt_int32) // C=A+scalar GB (_bind2nd__islt_int32) // C=A'+scalar GB (_bind2nd_tran__islt_int32) // C type: int32_t // A type: int32_t // A pattern? 0 // B type: int32_t // B pattern? 0 // BinaryOp: cij = (aij < bij) #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ int32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int32_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int32_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x < y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISLT || GxB_NO_INT32 || GxB_NO_ISLT_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__islt_int32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__islt_int32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__islt_int32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__islt_int32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__islt_int32) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__islt_int32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int32_t alpha_scalar ; int32_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int32_t *) alpha_scalar_in)) ; beta_scalar = (*((int32_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__islt_int32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__islt_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__islt_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__islt_int32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__islt_int32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *Cx = (int32_t *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int32_t bij = GBX (Bx, p, false) ; Cx [p] = (x < bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__islt_int32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int32_t *Cx = (int32_t *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int32_t aij = GBX (Ax, p, false) ; Cx [p] = (aij < y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x < aij) ; \ } GrB_Info GB (_bind1st_tran__islt_int32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij < y) ; \ } GrB_Info GB (_bind2nd_tran__islt_int32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
streamer.c
#include "grid.h" #include "config.h" #include "streamer.h" #include <hdf5.h> #include <stdlib.h> #include <unistd.h> #include <math.h> #include <complex.h> #include <string.h> #include <omp.h> #include <float.h> #include <pthread.h> #include <sys/mman.h> #include <sys/wait.h> struct streamer_chunk *writer_push_slot(struct streamer_writer *writer, struct bl_data *bl_data, int tchunk, int fchunk) { if (!writer) return NULL; // Determine our slot (competing with other tasks, so need to have // critical section here) int vis_slot; #pragma omp critical { vis_slot = writer->in_ptr; writer->in_ptr = (writer->in_ptr + 1) % writer->queue_length; } // Obtain lock for writing data (writer thread might not have // written this data to disk yet) struct streamer_chunk *chunk = writer->queue + vis_slot; #pragma omp atomic writer->to_write += 1; #ifndef __APPLE__ sem_wait(&chunk->in_lock); #else dispatch_semaphore_wait(chunk->in_lock, DISPATCH_TIME_FOREVER); #endif // Set slot data chunk->bl_data = bl_data; chunk->tchunk = tchunk; chunk->fchunk = fchunk; return chunk; } void streamer_ireceive(struct streamer *streamer, int subgrid_work, int slot) { const int xM_yN_size = streamer->work_cfg->recombine.xM_yN_size; struct subgrid_work *work = streamer->work_cfg->subgrid_work + streamer->subgrid_worker * streamer->work_cfg->subgrid_max_work; const int facet_count = streamer->work_cfg->facet_workers * streamer->work_cfg->facet_max_work; // Not populated or marked to skip? Clear slot if (!work[subgrid_work].nbl || streamer->skip_receive[subgrid_work]) { int facet; for (facet = 0; facet < facet_count; facet++) { *request_slot(streamer, slot, facet) = MPI_REQUEST_NULL; } streamer->request_work[slot] = -1; return; } // Set work streamer->request_work[slot] = subgrid_work; // Mark later subgrid repeats for skipping int iw; for (iw = subgrid_work+1; iw < streamer->work_cfg->subgrid_max_work; iw++) if (work[iw].iu == work[subgrid_work].iu && work[iw].iv == work[subgrid_work].iv && work[iw].iw == work[subgrid_work].iw) streamer->skip_receive[iw] = true; // Walk through all facets we expect contributions from, save requests const int facets = streamer->work_cfg->facet_workers * streamer->work_cfg->facet_max_work; int facet; for (facet = 0; facet < facets; facet++) { struct facet_work *fwork = streamer->work_cfg->facet_work + facet; if (!fwork->set) { *request_slot(streamer, slot, facet) = MPI_REQUEST_NULL; continue; } #ifndef NO_MPI // Set up a receive slot with appropriate tag const int facet_worker = facet / streamer->work_cfg->facet_max_work; const int facet_work = facet % streamer->work_cfg->facet_max_work; const int tag = make_subgrid_tag(streamer->work_cfg, streamer->subgrid_worker, subgrid_work, facet_worker, facet_work); //printf("Receiving iu=%d iv=%d iw=%d tag=%d facet=%d\n", // work[subgrid_work].iu, work[subgrid_work].iv, work[subgrid_work].iw, tag, facet_work); MPI_Irecv(nmbf_slot(streamer, slot, facet), xM_yN_size * xM_yN_size, MPI_DOUBLE_COMPLEX, streamer->producer_ranks[facet_worker], tag, MPI_COMM_WORLD, request_slot(streamer, slot, facet)); #endif } } void streamer_work(struct streamer *streamer, int subgrid_work, double complex *nmbf); int streamer_receive_a_subgrid(struct streamer *streamer, struct subgrid_work *work, int *waitsome_indices) { const int facet_work_count = streamer->work_cfg->facet_workers * streamer->work_cfg->facet_max_work; double start = get_time_ns(); int slot; int waiting = 0; for(;;) { // Wait for MPI data to arrive. We only use "Waitsome" if this // is not the first iteration *and* we know that there are // actually waiting requests. Waitsome might block // indefinetely - so we should only do that if we are sure // that we need to receive *something* before we get more work // to do. int index_count = 0; if (facet_work_count > 0) { if (waiting == 0) { MPI_Testsome(facet_work_count * streamer->queue_length, streamer->request_queue, &index_count, waitsome_indices, MPI_STATUSES_IGNORE); } else { MPI_Waitsome(facet_work_count * streamer->queue_length, streamer->request_queue, &index_count, waitsome_indices, MPI_STATUSES_IGNORE); } } // Note how much data was received int i; if (index_count > 0) { #pragma omp atomic streamer->received_data += index_count * streamer->work_cfg->recombine.NMBF_NMBF_size; } // Check that the indicated requests were actually cleared // (this behaviour isn't exactly prominently documented?). for (i = 0; i < index_count; i++) { assert(streamer->request_queue[waitsome_indices[i]] == MPI_REQUEST_NULL); streamer->request_queue[waitsome_indices[i]] = MPI_REQUEST_NULL; } // Find finished slot waiting = 0; for (slot = 0; slot < streamer->queue_length; slot++) { if (streamer->request_work[slot] >= 0) { // Check whether all requests are finished int i; for (i = 0; i < facet_work_count; i++) if (*request_slot(streamer, slot, i) != MPI_REQUEST_NULL) break; if (i >= facet_work_count) break; else { waiting++; } } } // Found and task queue not full? if (streamer->subgrid_tasks < streamer->work_cfg->vis_task_queue_length) { if (slot < streamer->queue_length) break; assert(waiting > 0); } else { // We are waiting for tasks to finish - idle for a bit usleep(1000); } } // Alright, found a slot with all data to form a subgrid. double complex *data_slot = nmbf_slot(streamer, slot, 0); streamer->received_subgrids++; streamer->wait_time += get_time_ns() - start; // Do work on received data. If a subgrid appears twice in our // work list, spawn all of the matching work const int iwork = streamer->request_work[slot]; int iw; for (iw = iwork; iw < streamer->work_cfg->subgrid_max_work; iw++) if (work[iw].iu == work[iwork].iu && work[iw].iv == work[iwork].iv && work[iw].iw == work[iwork].iw) streamer_work(streamer, iw, data_slot); // Return the (now free) slot streamer->request_work[slot] = -1; return slot; } void *streamer_reader_thread(void *param) { struct streamer *streamer = (struct streamer *)param; struct work_config *wcfg = streamer->work_cfg; struct subgrid_work *work = wcfg->subgrid_work + streamer->subgrid_worker * wcfg->subgrid_max_work; const int facets = wcfg->facet_workers * wcfg->facet_max_work; int *waitsome_indices = (int *)malloc(sizeof(int) * facets * streamer->queue_length); // Walk through subgrid work packets int iwork = 0, iwork_r = streamer->queue_length; for (iwork = 0; iwork < streamer->work_cfg->subgrid_max_work; iwork++) { // Skip? if (!work[iwork].nbl || streamer->skip_receive[iwork]) continue; // Receive a subgrid. Does not have to be in order. int slot = streamer_receive_a_subgrid(streamer, work, waitsome_indices); // Set up slot for new data (if appropriate) while (iwork_r < wcfg->subgrid_max_work && !work[iwork_r].nbl) iwork_r++; if (iwork_r < wcfg->subgrid_max_work) { streamer_ireceive(streamer, iwork_r, slot); } iwork_r++; } free(waitsome_indices); return NULL; } void *streamer_writer_thread(void *param) { struct streamer_writer *writer = (struct streamer_writer *) param; struct work_config *wcfg = writer->work_cfg; struct vis_spec *const spec = &writer->work_cfg->spec; const int vis_data_size = sizeof(double complex) * spec->time_chunk * spec->freq_chunk; double complex *vis_data_h5 = (double complex *) alloca(vis_data_size); // Create HDF5 output file if we are meant to output any amount of // visibilities if (!wcfg->vis_path) return NULL; // Get filename to use char filename[512]; sprintf(filename, wcfg->vis_path, writer->index); // For some reason we need to protect creating the file with a // critical section, otherwise libhdf5 messes up. Note that // creating groups (below) is apparently fine to do in parallel. #pragma omp critical { // Open file and "vis" group if (wcfg->vis_check_existing) { printf("\nOpening %s... ", filename); writer->file = H5Fopen(filename, H5F_ACC_RDONLY, H5P_DEFAULT); writer->group = H5Gopen(writer->file, "vis", H5P_DEFAULT); } else { printf("\nCreating %s... ", filename); writer->file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); if (writer->file >= 0) writer->group = H5Gcreate(writer->file, "vis", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); // Create baseline groups if (writer->file >= 0 && writer->group >= 0) create_bl_groups(writer->group, wcfg, writer->subgrid_worker); } } if (writer->file < 0 || writer->group < 0) { fprintf(stderr, "Could not open visibility file %s!\n", filename); return NULL; } int time_chunk_count = spec->time_count / spec->time_chunk; int freq_chunk_count = spec->freq_count / spec->freq_chunk; int chunk_count = spec->cfg->ant_count * spec->cfg->ant_count * time_chunk_count * freq_chunk_count; bool *chunks_written = calloc(sizeof(bool), chunk_count); for(;;) { // Obtain "out" lock for writing out visibilities double start = get_time_ns(); struct streamer_chunk *chunk = writer->queue + writer->out_ptr; #ifndef __APPLE__ sem_wait(&chunk->out_lock); #else dispatch_semaphore_wait(chunk->out_lock, DISPATCH_TIME_FOREVER); #endif writer->wait_out_time += get_time_ns() - start; start = get_time_ns(); // Obtain baseline data if (chunk->tchunk == -1 && chunk->fchunk == -1) break; // Signal to end thread if (chunk->tchunk == -2 && chunk->fchunk == -2) { #pragma omp atomic writer->to_write -= 1; #ifndef __APPLE__ sem_post(&chunk->in_lock); #else dispatch_semaphore_signal(chunk->in_lock); #endif writer->out_ptr = (writer->out_ptr + 1) % writer->queue_length; continue; // Signal to ignore chunk } const int nant = wcfg->spec.cfg->ant_count; struct bl_data *bl_data = chunk->bl_data; double complex *vis_data = chunk->vis; // Read visibility chunk. If it was not yet set, this will // just fill the buffer with zeroes. int chunk_index = ((bl_data->antenna2 * nant + bl_data->antenna1) * time_chunk_count + chunk->tchunk) * freq_chunk_count + chunk->fchunk; if (wcfg->vis_check_existing || chunks_written[chunk_index]) { read_vis_chunk(writer->group, bl_data, spec->time_chunk, spec->freq_chunk, chunk->tchunk, chunk->fchunk, vis_data_h5); } else { memset(vis_data_h5, 0, vis_data_size); } writer->read_time += get_time_ns() - start; start = get_time_ns(); if (wcfg->vis_check_existing) { // Compare data int i; for (i = 0; i < spec->time_chunk * spec->freq_chunk; i++) { if (vis_data[i] != 0) { if (cabs(vis_data_h5[i] - vis_data[i]) > 1e-12) { printf("%g%+gj != %g%+gj (diff %g)!\n", creal(vis_data_h5[i]), cimag(vis_data_h5[i]), creal(vis_data[i]), cimag(vis_data[i]), cabs(vis_data_h5[i] - vis_data[i])); } } } writer->rewritten_vis_data += vis_data_size; } else { // Copy over data start = get_time_ns(); int i; for (i = 0; i < spec->time_chunk * spec->freq_chunk; i++) { if (vis_data[i] != 0) { // Make sure we never over-write data! assert(vis_data_h5[i] == 0); vis_data_h5[i] = vis_data[i]; } } // Write chunk back write_vis_chunk(writer->group, bl_data, spec->time_chunk, spec->freq_chunk, chunk->tchunk, chunk->fchunk, vis_data_h5); writer->written_vis_data += vis_data_size; if (chunks_written[chunk_index]) writer->rewritten_vis_data += vis_data_size; chunks_written[chunk_index] = true; } // Release "in" lock to mark the slot free for writing #pragma omp atomic writer->to_write -= 1; #ifndef __APPLE__ sem_post(&chunk->in_lock); #else dispatch_semaphore_signal(chunk->in_lock); #endif writer->out_ptr = (writer->out_ptr + 1) % writer->queue_length; writer->write_time += get_time_ns() - start; } H5Gclose(writer->group); H5Fclose(writer->file); return NULL; } static void _append_stat(char *stats, const char *stat_name, int worker, double val, double mult) { sprintf(stats + strlen(stats), "user.recombine.%s:%g|g|#streamer:%d\n", stat_name, (double)(val * mult), worker); } static void _append_writer_stat(char *stats, const char *stat_name, int worker, int writer, double val, double mult) { sprintf(stats + strlen(stats), "user.recombine.%s:%g|g|#streamer:%d,writer:%d\n", stat_name, (double)(val * mult), worker, writer); } void *streamer_publish_stats(void *par) { // Get streamer state. Make a copy to maintain differences struct streamer *streamer = (struct streamer *)par; struct streamer last, now; memcpy(&last, streamer, sizeof(struct streamer)); struct streamer_writer *writers_last, *writers_now; const size_t writers_size = sizeof(struct streamer_writer) * streamer->writer_count; writers_last = (struct streamer_writer *)malloc(writers_size); writers_now = (struct streamer_writer *)malloc(writers_size); memcpy(writers_last, streamer->writer, writers_size); last.writer = writers_last; double sample_rate = streamer->work_cfg->statsd_rate; double next_stats = get_time_ns() + sample_rate; while(!streamer->finished) { // Make a snapshot of streamer state memcpy(&now, streamer, sizeof(struct streamer)); memcpy(writers_now, streamer->writer, writers_size); now.writer = writers_now; // Add counters char stats[4096]; stats[0] = 0; #define PARS(stat) stats, #stat, streamer->subgrid_worker, now.stat - last.stat _append_stat(PARS(wait_time), 100. / sample_rate); _append_stat(PARS(wait_in_time), 100. / streamer->num_workers / sample_rate); _append_stat(PARS(task_start_time), 100 / sample_rate); _append_stat(PARS(recombine_time), 100 / sample_rate); _append_stat(PARS(degrid_time), 100. / streamer->num_workers / sample_rate); _append_stat(PARS(received_data), 1 / sample_rate); _append_stat(PARS(received_subgrids), 1 / sample_rate); _append_stat(PARS(baselines_covered), 1 / sample_rate); _append_stat(PARS(degrid_flops), 1 / sample_rate); _append_stat(PARS(produced_chunks), 1 / sample_rate); _append_stat(PARS(task_yields), 1 / sample_rate); _append_stat(PARS(vis_error_samples), 1 / sample_rate); _append_stat(PARS(grid_error_samples), 1 / sample_rate); #undef PARS config_send_statsd(streamer->work_cfg, stats); stats[0] = 0; int i; for (i = 0; i < streamer->writer_count; i++) { #define PARS(stat) stats, #stat, streamer->subgrid_worker, now.writer[i].index, now.writer[i].stat - last.writer[i].stat _append_writer_stat(PARS(wait_out_time), 100 / sample_rate); _append_writer_stat(PARS(read_time), 100 / sample_rate); _append_writer_stat(PARS(write_time), 100 / sample_rate); _append_writer_stat(PARS(written_vis_data), 1 / sample_rate); _append_writer_stat(PARS(rewritten_vis_data), 1 / sample_rate); #undef PARS _append_writer_stat(stats, "chunks_to_write", streamer->subgrid_worker, now.writer[i].index, now.writer[i].to_write, 1); } // Receiver idle time double receiver_idle_time = 0; receiver_idle_time = sample_rate; receiver_idle_time -= now.wait_time - last.wait_time; receiver_idle_time -= now.task_start_time - last.task_start_time; receiver_idle_time -= now.recombine_time - last.recombine_time; _append_stat(stats, "receiver_idle_time", streamer->subgrid_worker, receiver_idle_time, 100 / sample_rate); // Worker idle time double worker_idle_time = 0; worker_idle_time = streamer->num_workers * sample_rate; worker_idle_time -= now.wait_in_time - last.wait_in_time; worker_idle_time -= now.degrid_time - last.degrid_time; _append_stat(stats, "worker_idle_time", streamer->subgrid_worker, worker_idle_time, 100 / sample_rate / streamer->num_workers); // Add gauges double derror_sum = now.vis_error_sum - last.vis_error_sum; uint64_t dsamples = now.vis_error_samples - last.vis_error_samples; if (dsamples > 0) { _append_stat(stats, "visibility_samples", streamer->subgrid_worker, dsamples, 1); _append_stat(stats, "visibility_rmse", streamer->subgrid_worker, 10 * log(sqrt(derror_sum / dsamples) / streamer->work_cfg->source_energy) / log(10), 1); } _append_stat(stats, "degrid_tasks", streamer->subgrid_worker, now.subgrid_tasks, 1); const int request_queue_length = streamer->work_cfg->facet_workers * streamer->work_cfg->facet_max_work * streamer->queue_length; int nrequests = 0; for (i = 0; i < request_queue_length; i++) if (now.request_queue[i] != MPI_REQUEST_NULL) nrequests++; _append_stat(stats, "waiting_requests", streamer->subgrid_worker, nrequests, 1); // Send to statsd server config_send_statsd(streamer->work_cfg, stats); // Copy from "now" to "last", to use as reference next time memcpy(&last, &now, sizeof(struct streamer)); memcpy(writers_last, writers_now, writers_size); last.writer = writers_last; // Determine when to next send stats, sleep while (next_stats <= get_time_ns()) { next_stats += sample_rate; } usleep((useconds_t) (1000000 * (next_stats - get_time_ns())) ); } return NULL; } bool streamer_init(struct streamer *streamer, struct work_config *wcfg, int subgrid_worker, int *producer_ranks) { struct recombine2d_config *cfg = &wcfg->recombine; const int facets = wcfg->facet_workers * wcfg->facet_max_work; streamer->work_cfg = wcfg; streamer->subgrid_worker = subgrid_worker; streamer->producer_ranks = producer_ranks; streamer->num_workers = omp_get_max_threads(); streamer->wait_time = streamer->wait_in_time = streamer->degrid_time = streamer->task_start_time = streamer->recombine_time = 0; streamer->received_data = 0; streamer->received_subgrids = streamer->baselines_covered = 0; streamer->vis_error_samples = 0; streamer->vis_error_sum = 0; streamer->vis_worst_error = 0; streamer->grid_error_samples = 0; streamer->grid_error_sum = 0; streamer->grid_worst_error = 0; streamer->degrid_flops = 0; streamer->produced_chunks = 0; streamer->task_yields = 0; streamer->subgrid_tasks = 0; streamer->finished = false; // Load gridding kernel if (wcfg->gridder.data) { streamer->kern = &wcfg->gridder; } // Calculate size of queues streamer->queue_length = wcfg->vis_subgrid_queue_length; streamer->vis_queue_length = wcfg->vis_chunk_queue_length; streamer->writer_count = (wcfg->vis_path ? wcfg->vis_writer_count : 0); hbool_t hdf5_threadsafe; H5is_library_threadsafe(&hdf5_threadsafe); if (!wcfg->vis_fork_writer && streamer->writer_count > 1 && !hdf5_threadsafe) { fprintf(stderr, "WARNING: libhdf5 is not thread safe, using only one writer thread!\n"); streamer->writer_count = 1; } if (streamer->writer_count > 0) { streamer->vis_queue_per_writer = streamer->vis_queue_length / streamer->writer_count; } else { streamer->vis_queue_per_writer = 0; } const int nmbf_length = cfg->NMBF_NMBF_size / sizeof(double complex); const size_t queue_size = (size_t)sizeof(double complex) * nmbf_length * facets * streamer->queue_length; const size_t sg_queue_size = (size_t)cfg->SG_size * streamer->queue_length; const size_t requests_size = (size_t)sizeof(MPI_Request) * facets * streamer->queue_length; struct vis_spec *const spec = &streamer->work_cfg->spec; const int vis_data_size = sizeof(double complex) * spec->time_chunk * spec->freq_chunk; printf("Allocating %.3g GB subgrid queue, %.3g GB visibility queue\n", (double)(queue_size+sg_queue_size+requests_size) / 1e9, (double)((size_t)streamer->vis_queue_length * (vis_data_size + 6 * sizeof(int))) / 1e9); // Allocate receive queue streamer->nmbf_queue = (double complex *)malloc(queue_size); streamer->request_queue = (MPI_Request *)malloc(requests_size); streamer->request_work = (int *)malloc(sizeof(int) * streamer->queue_length); streamer->subgrid_queue = (double complex *)malloc(sg_queue_size); streamer->subgrid_locks = (int *)calloc(sizeof(int), streamer->queue_length); streamer->skip_receive = (bool *)calloc(sizeof(bool), wcfg->subgrid_max_work); if (!streamer->nmbf_queue || !streamer->request_queue || !streamer->subgrid_queue || !streamer->subgrid_locks || !streamer->skip_receive) { fprintf(stderr, "ERROR: Could not allocate subgrid queue!\n"); return false; } // Populate receive queue int iwork; for (iwork = 0; iwork < wcfg->subgrid_max_work && iwork < streamer->queue_length; iwork++) { streamer_ireceive(streamer, iwork, iwork); } for (; iwork < streamer->queue_length; iwork++) { int i; for (i = 0; i < facets; i++) { *request_slot(streamer, iwork, i) = MPI_REQUEST_NULL; } streamer->request_work[iwork] = -1; } // Plan FFTs streamer->subgrid_plan = fftw_plan_dft_2d(cfg->xM_size, cfg->xM_size, streamer->subgrid_queue, streamer->subgrid_queue + cfg->SG_size, FFTW_BACKWARD, FFTW_MEASURE); // Allocate visibility queue streamer->vis_queue_size = (size_t)streamer->vis_queue_length * vis_data_size; streamer->vis_chunks_size = (size_t)streamer->vis_queue_length * sizeof(struct streamer_chunk); if (wcfg->vis_fork_writer) { streamer->vis_queue = mmap(NULL, streamer->vis_queue_size, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, -1, 0); streamer->vis_chunks = mmap(NULL, streamer->vis_chunks_size, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, -1, 0); } else { streamer->vis_queue = malloc((size_t)streamer->vis_queue_length * vis_data_size); streamer->vis_chunks = malloc((size_t)streamer->vis_queue_length * sizeof(struct streamer_chunk)); } if (!streamer->vis_queue || !streamer->vis_chunks) { fprintf(stderr, "ERROR: Could not allocate visibility queue!\n"); return false; } // Initialise writer thread data streamer->writer = NULL; streamer->writer_size = streamer->writer_count * sizeof(struct streamer_writer); if (streamer->writer_count > 0) { if (wcfg->vis_fork_writer) { printf("Using %d writer processes\n", streamer->writer_count); streamer->writer = mmap(NULL, streamer->writer_size, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, -1, 0); } else { printf("Using %d writer threads\n", streamer->writer_count); streamer->writer = calloc(1, streamer->writer_size); } int i; for (i = 0; i < streamer->writer_count; i++) { struct streamer_writer *writer = streamer->writer + i; writer->subgrid_worker = subgrid_worker; writer->index = subgrid_worker * streamer->writer_count + i; writer->work_cfg = wcfg; writer->file = writer->group = -1; writer->queue_length = streamer->vis_queue_per_writer; writer->in_ptr = writer->out_ptr = writer->to_write = 0; writer->queue = streamer->vis_chunks + i * streamer->vis_queue_per_writer; int j; for (j = 0; j < streamer->vis_queue_per_writer; j++) { #ifndef __APPLE__ sem_init(&writer->queue[j].in_lock, wcfg->vis_fork_writer, 1); sem_init(&writer->queue[j].out_lock, wcfg->vis_fork_writer, 0); #else writer->queue[j].in_lock = dispatch_semaphore_create(1); writer->queue[j].out_lock = dispatch_semaphore_create(0); #endif writer->queue[j].vis = streamer->vis_queue + spec->time_chunk * spec->freq_chunk * (i * streamer->vis_queue_per_writer + j); } // Now either fork the writer or start a thread if (wcfg->vis_fork_writer) { writer->pid = fork(); if (!writer->pid) { streamer_writer_thread(writer); exit(0); } } else { pthread_create(&writer->thread, NULL, streamer_writer_thread, writer); } } } return true; } bool streamer_free(struct streamer *streamer, double stream_start) { bool success = true; // Wait for writer to actually finish int i; if (streamer->writer_count > 0) { printf("Finishing writes...\n"); for (i = 0; i < streamer->writer_count; i++) { struct streamer_writer *writer = streamer->writer + i; if (streamer->work_cfg->vis_fork_writer) { waitpid(writer->pid, NULL, 0); } else { pthread_join(writer->thread, NULL); } } } double stream_time = get_time_ns() - stream_start; printf("Streamed for %.2fs\n", stream_time); printf("Received %.2f GB (%"PRIu64" subgrids, %"PRIu64" baselines)\n", (double)streamer->received_data / 1000000000, streamer->received_subgrids, streamer->baselines_covered); printf("Receiver: Wait: %gs, Recombine: %gs, Idle: %gs\n", streamer->wait_time, streamer->recombine_time, stream_time - streamer->wait_time - streamer->recombine_time); printf("Worker: Wait: %gs, Degrid: %gs, Idle: %gs\n", streamer->wait_in_time, streamer->degrid_time, streamer->num_workers * stream_time - streamer->wait_in_time - streamer->degrid_time - streamer->wait_time - streamer->recombine_time); printf("Operations: degrid %.1f GFLOP/s (%"PRIu64" chunks)\n", (double)streamer->degrid_flops / stream_time / 1000000000, streamer->produced_chunks); if (streamer->vis_error_samples > 0) { // Calculate root mean square error const double grid_rmse = sqrt(streamer->grid_error_sum / streamer->grid_error_samples); const double vis_rmse = sqrt(streamer->vis_error_sum / streamer->vis_error_samples); // Normalise by assuming that the energy of sources is // distributed evenly to all grid points const double source_energy = streamer->work_cfg->source_energy; printf("Grid accuracy: RMSE %g, worst %g (%"PRIu64" samples)\n", grid_rmse / source_energy, streamer->grid_worst_error / source_energy, streamer->grid_error_samples); printf("Vis accuracy: RMSE %g, worst %g (%"PRIu64" samples)\n", vis_rmse / source_energy, streamer->vis_worst_error / source_energy, streamer->vis_error_samples); // Check against error bounds if (fmax(streamer->grid_worst_error, streamer->vis_worst_error) > streamer->work_cfg->vis_max_error * source_energy) { printf("ERROR: Accuracy worse than RMSE threshold of %g!\n", streamer->work_cfg->vis_max_error); success = false; } } for (i = 0; i < streamer->writer_count; i++) { struct streamer_writer *writer = streamer->writer + i; int j; for (j = 0; j < streamer->vis_queue_per_writer; j++) { #ifndef __APPLE__ sem_destroy(&writer->queue[j].in_lock); sem_destroy(&writer->queue[j].out_lock); #else dispatch_release(writer->queue[i].in_lock); dispatch_release(writer->queue[i].out_lock); #endif } // Then print stats. The above join can hang a bit as data // gets flushed, so re-determine stream time. printf("Writer %d: %.2f GB (rewritten %.2f GB), rate %.2f GB/s (%.2f GB/s effective)\n", writer->index, (double)writer->written_vis_data / 1000000000, (double)writer->rewritten_vis_data / 1000000000, (double)writer->written_vis_data / 1000000000 / stream_time, (double)(writer->written_vis_data - writer->rewritten_vis_data) / 1000000000 / stream_time); printf("Writer %d: Wait: %gs, Read: %gs, Write: %gs, Idle: %gs\n", writer->index, writer->wait_out_time, writer->read_time, writer->write_time, stream_time - writer->wait_out_time - writer->read_time - writer->write_time); } free(streamer->nmbf_queue); free(streamer->subgrid_queue); free(streamer->request_queue); free(streamer->subgrid_locks); free(streamer->skip_receive); fftw_free(streamer->subgrid_plan); if (streamer->work_cfg->vis_fork_writer) { munmap(streamer->vis_queue, streamer->vis_queue_size); munmap(streamer->vis_chunks, streamer->vis_chunks_size); if (streamer->writer) { munmap(streamer->writer, streamer->writer_size); } } else { free(streamer->vis_queue); free(streamer->vis_chunks); free(streamer->writer); } return success; } int streamer(struct work_config *wcfg, int subgrid_worker, int *producer_ranks) { struct streamer streamer; if (!streamer_init(&streamer, wcfg, subgrid_worker, producer_ranks)) { return 1; } double stream_start = get_time_ns(); // Add reader, writer and statistic threads to OpenMP // threads. Those will idle most of the time, and therefore should // not count towards worker limit. int num_threads = streamer.num_workers; num_threads++; // reader thread if (wcfg->statsd_socket >= 0) { num_threads++; // statistics thread } omp_set_num_threads(num_threads); printf("Waiting for data (%d threads)...\n", num_threads); // Start doing work. Note that all the actual work will be added // by the reader thread, which will generate OpenMP tasks to be // executed by the following parallel section. All we're doing // here is yielding to them and shutting everything down once all // work has been completed. #pragma omp parallel sections { #pragma omp section { streamer_reader_thread(&streamer); // Wait for tasks to finish (could do a taskwait, but that // might mess up the thread balance) while (streamer.subgrid_tasks > 0) { usleep(10000); } #pragma omp taskwait // Just to be sure // All work is done - signal writer tasks to exit streamer.finished = true; int writer = 0; for (writer = 0; writer < streamer.writer_count; writer++) { struct streamer_chunk *slot = writer_push_slot( streamer.writer + writer, NULL, -1, -1); #ifndef __APPLE__ sem_post(&slot->out_lock); #else dispatch_semaphore_signal(slot->out_lock); #endif } } #pragma omp section { if (wcfg->statsd_socket >= 0) { streamer_publish_stats(&streamer); } } } // #pragma omp parallel sections // Finalise streamer. Check for successful completion. if (streamer_free(&streamer, stream_start)) { return 0; } else { return 1; } }
WENOFifthOrderCalculateWeights.c
/*! @file WENOFifthOrderCalculateWeights.c @brief Functions to compute the nonlinear weights for WENO-type schemes @author Debojyoti Ghosh */ #include <stdlib.h> #include <string.h> #include <basic.h> #include <arrayfunctions.h> #include <mathfunctions.h> #include <interpolation.h> #include <mpivars.h> #include <hypar.h> #undef _MINIMUM_GHOSTS_ /*! \def _MINIMUM_GHOSTS_ * Minimum number of ghost points required for this interpolation * method. */ #define _MINIMUM_GHOSTS_ 3 static int WENOFifthOrderCalculateWeightsJS(double*,double*,double*,int,void*,void*); static int WENOFifthOrderCalculateWeightsM (double*,double*,double*,int,void*,void*); static int WENOFifthOrderCalculateWeightsZ (double*,double*,double*,int,void*,void*); static int WENOFifthOrderCalculateWeightsYC(double*,double*,double*,int,void*,void*); static int WENOFifthOrderCalculateWeightsCharJS(double*,double*,double*,int,void*,void*); static int WENOFifthOrderCalculateWeightsCharM (double*,double*,double*,int,void*,void*); static int WENOFifthOrderCalculateWeightsCharZ (double*,double*,double*,int,void*,void*); static int WENOFifthOrderCalculateWeightsCharYC(double*,double*,double*,int,void*,void*); /*! Compute the nonlinear weights for 5th order WENO-type schemes. This function is a wrapper that calls the appropriate function, depending on the type of WENO weights. */ int WENOFifthOrderCalculateWeights( double *fC, /*!< Array of cell-centered values of the function \f${\bf f}\left({\bf u}\right)\f$ */ double *uC, /*!< Array of cell-centered values of the solution \f${\bf u}\f$ */ double *x, /*!< Grid coordinates */ int dir, /*!< Spatial dimension along which to interpolation */ void *s, /*!< Object of type #HyPar containing solver-related variables */ void *m /*!< Object of type #MPIVariables containing MPI-related variables */ ) { HyPar *solver = (HyPar*) s; WENOParameters *weno = (WENOParameters*) solver->interp; MPIVariables *mpi = (MPIVariables*) m; if (weno->yc) return(WENOFifthOrderCalculateWeightsYC (fC,uC,x,dir,solver,mpi)); else if (weno->borges) return(WENOFifthOrderCalculateWeightsZ (fC,uC,x,dir,solver,mpi)); else if (weno->mapped) return(WENOFifthOrderCalculateWeightsM (fC,uC,x,dir,solver,mpi)); else return(WENOFifthOrderCalculateWeightsJS (fC,uC,x,dir,solver,mpi)); } /*! Compute the nonlinear weights for 5th order WENO-type schemes. This function is a wrapper that calls the appropriate function, depending on the type of WENO weights. */ int WENOFifthOrderCalculateWeightsChar( double *fC, /*!< Array of cell-centered values of the function \f${\bf f}\left({\bf u}\right)\f$ */ double *uC, /*!< Array of cell-centered values of the solution \f${\bf u}\f$ */ double *x, /*!< Grid coordinates */ int dir, /*!< Spatial dimension along which to interpolation */ void *s, /*!< Object of type #HyPar containing solver-related variables */ void *m /*!< Object of type #MPIVariables containing MPI-related variables */ ) { HyPar *solver = (HyPar*) s; WENOParameters *weno = (WENOParameters*) solver->interp; MPIVariables *mpi = (MPIVariables*) m; if (weno->yc) return(WENOFifthOrderCalculateWeightsCharYC (fC,uC,x,dir,solver,mpi)); else if (weno->borges) return(WENOFifthOrderCalculateWeightsCharZ (fC,uC,x,dir,solver,mpi)); else if (weno->mapped) return(WENOFifthOrderCalculateWeightsCharM (fC,uC,x,dir,solver,mpi)); else return(WENOFifthOrderCalculateWeightsCharJS (fC,uC,x,dir,solver,mpi)); } /*! Computes the nonlinear weights for the 5th order component-wise WENO-type schemes using the original formulation of Jiang & Shu: \f{equation}{ \omega_k = \frac {a_k} {\sum_{j=1}^3 a_j },\ a_k = \frac {c_k} {\left(\beta_k+\epsilon\right)^p},\ k = 1,2,3, \f} where \f$c_k\f$ are the optimal weights, \f$p\f$ is hardcoded to \f$2\f$, and \f$\epsilon\f$ is an input parameter (#WENOParameters::eps) (typically \f$10^{-6}\f$). The smoothness indicators \f$\beta_k\f$ are given by: \f{eqnarray}{ \beta_1 &=& \frac{13}{12} \left(f_{j-2}-2f_{j-1}+f_j\right)^2 + \frac{1}{4}\left(f_{j-2}-4f_{j-1}+3f_j\right)^2 \\ \beta_2 &=& \frac{13}{12} \left(f_{j-1}-2f_j+f_{j+1}\right)^2 + \frac{1}{4}\left(f_{j-1}-f_{j+1}\right)^2 \\ \beta_3 &=& \frac{13}{12} \left(f_j-2f_{j+1}+f_{j+2}\right)^2 + \frac{1}{4}\left(3f_j-4f_{j+1}+f_{j+2}\right)^2 \f} \b Notes: + This function computes the weights for the entire grid (for interpolation along a given spatial dimension \a dir ). Thus, it loops over all grid lines along \a dir. + The weights are computed for all components, for both the left- and right-biased interpolations, and for both the flux function \f${\bf f}\left({\bf u}\right)\f$ and the solution \f$\bf u\f$. Thus, this approach of computing and storing the weights is quite memory-intensive. + The variable \a offset denotes the offset in the complete array from where the weights for interpolation along the current interpolation dimension (\a dir ) are stored. \b Reference: + Jiang, Shu, Efficient Implementation of Weighted ENO Schemes, J. Comput. Phys., 1996. http://dx.doi.org/10.1006/jcph.1996.0130 */ int WENOFifthOrderCalculateWeightsJS( double *fC, /*!< Array of cell-centered values of the function \f${\bf f}\left({\bf u}\right)\f$ */ double *uC, /*!< Array of cell-centered values of the solution \f${\bf u}\f$ */ double *x, /*!< Grid coordinates */ int dir, /*!< Spatial dimension along which to interpolation */ void *s, /*!< Object of type #HyPar containing solver-related variables */ void *m /*!< Object of type #MPIVariables containing MPI-related variables */ ) { HyPar *solver = (HyPar*) s; WENOParameters *weno = (WENOParameters*) solver->interp; MPIVariables *mpi = (MPIVariables*) m; int i; double *ww1LF, *ww2LF, *ww3LF, *ww1RF, *ww2RF, *ww3RF; double *ww1LU, *ww2LU, *ww3LU, *ww1RU, *ww2RU, *ww3RU; int ghosts = solver->ghosts; int ndims = solver->ndims; int nvars = solver->nvars; int *dim = solver->dim_local; int *stride= solver->stride_with_ghosts; /* define some constants */ static const double thirteen_by_twelve = 13.0/12.0; static const double one_fourth = 1.0/4.0; /* calculate dimension offset */ int offset = weno->offset[dir]; /* create index and bounds for the outer loop, i.e., to loop over all 1D lines along dimension "dir" */ int indexC[ndims], indexI[ndims], index_outer[ndims], bounds_outer[ndims], bounds_inter[ndims]; _ArrayCopy1D_(dim,bounds_outer,ndims); bounds_outer[dir] = 1; _ArrayCopy1D_(dim,bounds_inter,ndims); bounds_inter[dir] += 1; int N_outer; _ArrayProduct1D_(bounds_outer,ndims,N_outer); ww1LF = weno->w1 + offset; ww2LF = weno->w2 + offset; ww3LF = weno->w3 + offset; ww1RF = weno->w1 + 2*weno->size + offset; ww2RF = weno->w2 + 2*weno->size + offset; ww3RF = weno->w3 + 2*weno->size + offset; ww1LU = weno->w1 + weno->size + offset; ww2LU = weno->w2 + weno->size + offset; ww3LU = weno->w3 + weno->size + offset; ww1RU = weno->w1 + 2*weno->size + weno->size + offset; ww2RU = weno->w2 + 2*weno->size + weno->size + offset; ww3RU = weno->w3 + 2*weno->size + weno->size + offset; #pragma omp parallel for schedule(auto) default(shared) private(i,index_outer,indexC,indexI) for (i=0; i<N_outer; i++) { _ArrayIndexnD_(ndims,i,bounds_outer,index_outer,0); _ArrayCopy1D_(index_outer,indexC,ndims); _ArrayCopy1D_(index_outer,indexI,ndims); for (indexI[dir] = 0; indexI[dir] < dim[dir]+1; indexI[dir]++) { int qm1L,qm2L,qm3L,qp1L,qp2L,p,qm1R,qm2R,qm3R,qp1R,qp2R; _ArrayIndex1D_(ndims,bounds_inter,indexI,0,p); indexC[dir] = indexI[dir]-1; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qm1L); qm3L = qm1L - 2*stride[dir]; qm2L = qm1L - stride[dir]; qp1L = qm1L + stride[dir]; qp2L = qm1L + 2*stride[dir]; indexC[dir] = indexI[dir] ; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qm1R); qm3R = qm1R + 2*stride[dir]; qm2R = qm1R + stride[dir]; qp1R = qm1R - stride[dir]; qp2R = qm1R - 2*stride[dir]; /* Defining stencil points */ double *m3LF, *m2LF, *m1LF, *p1LF, *p2LF; m3LF = (fC+qm3L*nvars); m2LF = (fC+qm2L*nvars); m1LF = (fC+qm1L*nvars); p1LF = (fC+qp1L*nvars); p2LF = (fC+qp2L*nvars); double *m3RF, *m2RF, *m1RF, *p1RF, *p2RF; m3RF = (fC+qm3R*nvars); m2RF = (fC+qm2R*nvars); m1RF = (fC+qm1R*nvars); p1RF = (fC+qp1R*nvars); p2RF = (fC+qp2R*nvars); double *m3LU, *m2LU, *m1LU, *p1LU, *p2LU; m3LU = (uC+qm3L*nvars); m2LU = (uC+qm2L*nvars); m1LU = (uC+qm1L*nvars); p1LU = (uC+qp1L*nvars); p2LU = (uC+qp2L*nvars); double *m3RU, *m2RU, *m1RU, *p1RU, *p2RU; m3RU = (uC+qm3R*nvars); m2RU = (uC+qm2R*nvars); m1RU = (uC+qm1R*nvars); p1RU = (uC+qp1R*nvars); p2RU = (uC+qp2R*nvars); /* optimal weights*/ double c1, c2, c3; if (!strcmp(solver->spatial_scheme_hyp,_FIFTH_ORDER_CRWENO_)) { if ( ((mpi->ip[dir] == 0 ) && (indexI[dir] == 0 )) || ((mpi->ip[dir] == mpi->iproc[dir]-1) && (indexI[dir] == dim[dir])) ) { /* Use WENO5 at the physical boundaries */ c1 = _WENO_OPTIMAL_WEIGHT_1_; c2 = _WENO_OPTIMAL_WEIGHT_2_; c3 = _WENO_OPTIMAL_WEIGHT_3_; } else { /* CRWENO5 at the interior points */ c1 = _CRWENO_OPTIMAL_WEIGHT_1_; c2 = _CRWENO_OPTIMAL_WEIGHT_2_; c3 = _CRWENO_OPTIMAL_WEIGHT_3_; } } else { /* WENO5 and HCWENO5 */ c1 = _WENO_OPTIMAL_WEIGHT_1_; c2 = _WENO_OPTIMAL_WEIGHT_2_; c3 = _WENO_OPTIMAL_WEIGHT_3_; } /* calculate WENO weights */ _WENOWeights_v_JS_((ww1LF+p*nvars),(ww2LF+p*nvars),(ww3LF+p*nvars),c1,c2,c3,m3LF,m2LF,m1LF,p1LF,p2LF,weno,nvars); _WENOWeights_v_JS_((ww1RF+p*nvars),(ww2RF+p*nvars),(ww3RF+p*nvars),c1,c2,c3,m3RF,m2RF,m1RF,p1RF,p2RF,weno,nvars); _WENOWeights_v_JS_((ww1LU+p*nvars),(ww2LU+p*nvars),(ww3LU+p*nvars),c1,c2,c3,m3LU,m2LU,m1LU,p1LU,p2LU,weno,nvars); _WENOWeights_v_JS_((ww1RU+p*nvars),(ww2RU+p*nvars),(ww3RU+p*nvars),c1,c2,c3,m3RU,m2RU,m1RU,p1RU,p2RU,weno,nvars); } } return(0); } /*! Computes the nonlinear weights for the 5th order component-wise WENO-type schemes using the mapped formulation of Henrick, Aslam & Powers: \f{eqnarray}{ \omega_k &=& \frac {a_k} {\sum_{j=1}^3 a_j },\ a_k = \frac {\tilde{\omega}_k \left( c_k + c_k^2 - 3c_k\tilde{\omega}_k + \tilde{\omega}_k^2\right)} {c_k^2 + \tilde{\omega}_k\left(1-2c_k\right)}, \\ \tilde{\omega}_k &=& \frac {\tilde{a}_k} {\sum_{j=1}^3 \tilde{a}_j },\ \tilde{a}_k = \frac {c_k} {\left(\beta_k+\epsilon\right)^p},\ k = 1,2,3, \f} where \f$c_k\f$ are the optimal weights, \f$p\f$ is hardcoded to \f$2\f$, and \f$\epsilon\f$ is an input parameter (#WENOParameters::eps) (typically \f$10^{-6}\f$). The smoothness indicators \f$\beta_k\f$ are given by: \f{eqnarray}{ \beta_1 &=& \frac{13}{12} \left(f_{j-2}-2f_{j-1}+f_j\right)^2 + \frac{1}{4}\left(f_{j-2}-4f_{j-1}+3f_j\right)^2 \\ \beta_2 &=& \frac{13}{12} \left(f_{j-1}-2f_j+f_{j+1}\right)^2 + \frac{1}{4}\left(f_{j-1}-f_{j+1}\right)^2 \\ \beta_3 &=& \frac{13}{12} \left(f_j-2f_{j+1}+f_{j+2}\right)^2 + \frac{1}{4}\left(3f_j-4f_{j+1}+f_{j+2}\right)^2 \f} \b Notes: + This function computes the weights for the entire grid (for interpolation along a given spatial dimension \a dir ). Thus, it loops over all grid lines along \a dir. + The weights are computed for all components, for both the left- and right-biased interpolations, and for both the flux function \f${\bf f}\left({\bf u}\right)\f$ and the solution \f$\bf u\f$. Thus, this approach of computing and storing the weights is quite memory-intensive. + The variable \a offset denotes the offset in the complete array from where the weights for interpolation along the current interpolation dimension (\a dir ) are stored. \b Reference: + Henrick, Aslam, Powers, Mapped weighted essentially non-oscillatory schemes: Achieving optimal order near critical points, J. Comput. Phys., 2005. http://dx.doi.org/10.1016/j.jcp.2005.01.023 */ int WENOFifthOrderCalculateWeightsM( double *fC, /*!< Array of cell-centered values of the function \f${\bf f}\left({\bf u}\right)\f$ */ double *uC, /*!< Array of cell-centered values of the solution \f${\bf u}\f$ */ double *x, /*!< Grid coordinates */ int dir, /*!< Spatial dimension along which to interpolation */ void *s, /*!< Object of type #HyPar containing solver-related variables */ void *m /*!< Object of type #MPIVariables containing MPI-related variables */ ) { HyPar *solver = (HyPar*) s; WENOParameters *weno = (WENOParameters*) solver->interp; MPIVariables *mpi = (MPIVariables*) m; int i; double *ww1LF, *ww2LF, *ww3LF, *ww1RF, *ww2RF, *ww3RF; double *ww1LU, *ww2LU, *ww3LU, *ww1RU, *ww2RU, *ww3RU; int ghosts = solver->ghosts; int ndims = solver->ndims; int nvars = solver->nvars; int *dim = solver->dim_local; int *stride= solver->stride_with_ghosts; /* define some constants */ static const double thirteen_by_twelve = 13.0/12.0; static const double one_fourth = 1.0/4.0; /* calculate dimension offset */ int offset = weno->offset[dir]; /* create index and bounds for the outer loop, i.e., to loop over all 1D lines along dimension "dir" */ int indexC[ndims], indexI[ndims], index_outer[ndims], bounds_outer[ndims], bounds_inter[ndims]; _ArrayCopy1D_(dim,bounds_outer,ndims); bounds_outer[dir] = 1; _ArrayCopy1D_(dim,bounds_inter,ndims); bounds_inter[dir] += 1; int N_outer; _ArrayProduct1D_(bounds_outer,ndims,N_outer); ww1LF = weno->w1 + offset; ww2LF = weno->w2 + offset; ww3LF = weno->w3 + offset; ww1RF = weno->w1 + 2*weno->size + offset; ww2RF = weno->w2 + 2*weno->size + offset; ww3RF = weno->w3 + 2*weno->size + offset; ww1LU = weno->w1 + weno->size + offset; ww2LU = weno->w2 + weno->size + offset; ww3LU = weno->w3 + weno->size + offset; ww1RU = weno->w1 + 2*weno->size + weno->size + offset; ww2RU = weno->w2 + 2*weno->size + weno->size + offset; ww3RU = weno->w3 + 2*weno->size + weno->size + offset; #pragma omp parallel for schedule(auto) default(shared) private(i,index_outer,indexC,indexI) for (i=0; i<N_outer; i++) { _ArrayIndexnD_(ndims,i,bounds_outer,index_outer,0); _ArrayCopy1D_(index_outer,indexC,ndims); _ArrayCopy1D_(index_outer,indexI,ndims); for (indexI[dir] = 0; indexI[dir] < dim[dir]+1; indexI[dir]++) { int qm1L,qm2L,qm3L,qp1L,qp2L,p,qm1R,qm2R,qm3R,qp1R,qp2R; _ArrayIndex1D_(ndims,bounds_inter,indexI,0,p); indexC[dir] = indexI[dir]-1; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qm1L); qm3L = qm1L - 2*stride[dir]; qm2L = qm1L - stride[dir]; qp1L = qm1L + stride[dir]; qp2L = qm1L + 2*stride[dir]; indexC[dir] = indexI[dir] ; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qm1R); qm3R = qm1R + 2*stride[dir]; qm2R = qm1R + stride[dir]; qp1R = qm1R - stride[dir]; qp2R = qm1R - 2*stride[dir]; /* Defining stencil points */ double *m3LF, *m2LF, *m1LF, *p1LF, *p2LF; m3LF = (fC+qm3L*nvars); m2LF = (fC+qm2L*nvars); m1LF = (fC+qm1L*nvars); p1LF = (fC+qp1L*nvars); p2LF = (fC+qp2L*nvars); double *m3RF, *m2RF, *m1RF, *p1RF, *p2RF; m3RF = (fC+qm3R*nvars); m2RF = (fC+qm2R*nvars); m1RF = (fC+qm1R*nvars); p1RF = (fC+qp1R*nvars); p2RF = (fC+qp2R*nvars); double *m3LU, *m2LU, *m1LU, *p1LU, *p2LU; m3LU = (uC+qm3L*nvars); m2LU = (uC+qm2L*nvars); m1LU = (uC+qm1L*nvars); p1LU = (uC+qp1L*nvars); p2LU = (uC+qp2L*nvars); double *m3RU, *m2RU, *m1RU, *p1RU, *p2RU; m3RU = (uC+qm3R*nvars); m2RU = (uC+qm2R*nvars); m1RU = (uC+qm1R*nvars); p1RU = (uC+qp1R*nvars); p2RU = (uC+qp2R*nvars); /* optimal weights*/ double c1, c2, c3; if (!strcmp(solver->spatial_scheme_hyp,_FIFTH_ORDER_CRWENO_)) { if ( ((mpi->ip[dir] == 0 ) && (indexI[dir] == 0 )) || ((mpi->ip[dir] == mpi->iproc[dir]-1) && (indexI[dir] == dim[dir])) ) { /* Use WENO5 at the physical boundaries */ c1 = _WENO_OPTIMAL_WEIGHT_1_; c2 = _WENO_OPTIMAL_WEIGHT_2_; c3 = _WENO_OPTIMAL_WEIGHT_3_; } else { /* CRWENO5 at the interior points */ c1 = _CRWENO_OPTIMAL_WEIGHT_1_; c2 = _CRWENO_OPTIMAL_WEIGHT_2_; c3 = _CRWENO_OPTIMAL_WEIGHT_3_; } } else { /* WENO5 and HCWENO5 */ c1 = _WENO_OPTIMAL_WEIGHT_1_; c2 = _WENO_OPTIMAL_WEIGHT_2_; c3 = _WENO_OPTIMAL_WEIGHT_3_; } /* calculate WENO weights */ _WENOWeights_v_M_((ww1LF+p*nvars),(ww2LF+p*nvars),(ww3LF+p*nvars),c1,c2,c3,m3LF,m2LF,m1LF,p1LF,p2LF,weno,nvars); _WENOWeights_v_M_((ww1RF+p*nvars),(ww2RF+p*nvars),(ww3RF+p*nvars),c1,c2,c3,m3RF,m2RF,m1RF,p1RF,p2RF,weno,nvars); _WENOWeights_v_M_((ww1LU+p*nvars),(ww2LU+p*nvars),(ww3LU+p*nvars),c1,c2,c3,m3LU,m2LU,m1LU,p1LU,p2LU,weno,nvars); _WENOWeights_v_M_((ww1RU+p*nvars),(ww2RU+p*nvars),(ww3RU+p*nvars),c1,c2,c3,m3RU,m2RU,m1RU,p1RU,p2RU,weno,nvars); } } return(0); } /*! Computes the nonlinear weights for the 5th order component-wise WENO-type schemes using the WENO-Z formulation: \f{equation}{ \omega_k = \frac {a_k} {\sum_{j=1}^3 a_j },\ a_k = c_k \left( 1 + \frac{\tau_5}{\beta_k+\epsilon} \right)^p,\ k = 1,2,3, \f} where \f$c_k\f$ are the optimal weights, \f$p\f$ is hardcoded to \f$2\f$, and \f$\epsilon\f$ is an input parameter (#WENOParameters::eps) (typically \f$10^{-6}\f$). The smoothness indicators \f$\beta_k\f$ are given by: \f{eqnarray}{ \beta_1 &=& \frac{13}{12} \left(f_{j-2}-2f_{j-1}+f_j\right)^2 + \frac{1}{4}\left(f_{j-2}-4f_{j-1}+3f_j\right)^2 \\ \beta_2 &=& \frac{13}{12} \left(f_{j-1}-2f_j+f_{j+1}\right)^2 + \frac{1}{4}\left(f_{j-1}-f_{j+1}\right)^2 \\ \beta_3 &=& \frac{13}{12} \left(f_j-2f_{j+1}+f_{j+2}\right)^2 + \frac{1}{4}\left(3f_j-4f_{j+1}+f_{j+2}\right)^2, \f} and \f$\tau_5 = \left|\beta_1 - \beta_3 \right|\f$. \b Notes: + This function computes the weights for the entire grid (for interpolation along a given spatial dimension \a dir ). Thus, it loops over all grid lines along \a dir. + The weights are computed for all components, for both the left- and right-biased interpolations, and for both the flux function \f${\bf f}\left({\bf u}\right)\f$ and the solution \f$\bf u\f$. Thus, this approach of computing and storing the weights is quite memory-intensive. + The variable \a offset denotes the offset in the complete array from where the weights for interpolation along the current interpolation dimension (\a dir ) are stored. \b Reference: + Borges, et. al., An improved weighted essentially non-oscillatory scheme for hyperbolic conservation laws, J. Comput. Phys., 2008. http://dx.doi.org/10.1016/j.jcp.2007.11.038 + Castro, M., Costa, B., Don, W. S., High order weighted essentially non-oscillatory WENO-Z schemes for hyperbolic conservation laws, J. Comput. Phys., 2011. http://dx.doi.org/10.1016/j.jcp.2010.11.028 */ int WENOFifthOrderCalculateWeightsZ( double *fC, /*!< Array of cell-centered values of the function \f${\bf f}\left({\bf u}\right)\f$ */ double *uC, /*!< Array of cell-centered values of the solution \f${\bf u}\f$ */ double *x, /*!< Grid coordinates */ int dir, /*!< Spatial dimension along which to interpolation */ void *s, /*!< Object of type #HyPar containing solver-related variables */ void *m /*!< Object of type #MPIVariables containing MPI-related variables */ ) { HyPar *solver = (HyPar*) s; WENOParameters *weno = (WENOParameters*) solver->interp; MPIVariables *mpi = (MPIVariables*) m; int i; double *ww1LF, *ww2LF, *ww3LF, *ww1RF, *ww2RF, *ww3RF; double *ww1LU, *ww2LU, *ww3LU, *ww1RU, *ww2RU, *ww3RU; int ghosts = solver->ghosts; int ndims = solver->ndims; int nvars = solver->nvars; int *dim = solver->dim_local; int *stride= solver->stride_with_ghosts; /* define some constants */ static const double thirteen_by_twelve = 13.0/12.0; static const double one_fourth = 1.0/4.0; /* calculate dimension offset */ int offset = weno->offset[dir]; /* create index and bounds for the outer loop, i.e., to loop over all 1D lines along dimension "dir" */ int indexC[ndims], indexI[ndims], index_outer[ndims], bounds_outer[ndims], bounds_inter[ndims]; _ArrayCopy1D_(dim,bounds_outer,ndims); bounds_outer[dir] = 1; _ArrayCopy1D_(dim,bounds_inter,ndims); bounds_inter[dir] += 1; int N_outer; _ArrayProduct1D_(bounds_outer,ndims,N_outer); ww1LF = weno->w1 + offset; ww2LF = weno->w2 + offset; ww3LF = weno->w3 + offset; ww1RF = weno->w1 + 2*weno->size + offset; ww2RF = weno->w2 + 2*weno->size + offset; ww3RF = weno->w3 + 2*weno->size + offset; ww1LU = weno->w1 + weno->size + offset; ww2LU = weno->w2 + weno->size + offset; ww3LU = weno->w3 + weno->size + offset; ww1RU = weno->w1 + 2*weno->size + weno->size + offset; ww2RU = weno->w2 + 2*weno->size + weno->size + offset; ww3RU = weno->w3 + 2*weno->size + weno->size + offset; #pragma omp parallel for schedule(auto) default(shared) private(i,index_outer,indexC,indexI) for (i=0; i<N_outer; i++) { _ArrayIndexnD_(ndims,i,bounds_outer,index_outer,0); _ArrayCopy1D_(index_outer,indexC,ndims); _ArrayCopy1D_(index_outer,indexI,ndims); for (indexI[dir] = 0; indexI[dir] < dim[dir]+1; indexI[dir]++) { int qm1L,qm2L,qm3L,qp1L,qp2L,p,qm1R,qm2R,qm3R,qp1R,qp2R; _ArrayIndex1D_(ndims,bounds_inter,indexI,0,p); indexC[dir] = indexI[dir]-1; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qm1L); qm3L = qm1L - 2*stride[dir]; qm2L = qm1L - stride[dir]; qp1L = qm1L + stride[dir]; qp2L = qm1L + 2*stride[dir]; indexC[dir] = indexI[dir] ; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qm1R); qm3R = qm1R + 2*stride[dir]; qm2R = qm1R + stride[dir]; qp1R = qm1R - stride[dir]; qp2R = qm1R - 2*stride[dir]; /* Defining stencil points */ double *m3LF, *m2LF, *m1LF, *p1LF, *p2LF; m3LF = (fC+qm3L*nvars); m2LF = (fC+qm2L*nvars); m1LF = (fC+qm1L*nvars); p1LF = (fC+qp1L*nvars); p2LF = (fC+qp2L*nvars); double *m3RF, *m2RF, *m1RF, *p1RF, *p2RF; m3RF = (fC+qm3R*nvars); m2RF = (fC+qm2R*nvars); m1RF = (fC+qm1R*nvars); p1RF = (fC+qp1R*nvars); p2RF = (fC+qp2R*nvars); double *m3LU, *m2LU, *m1LU, *p1LU, *p2LU; m3LU = (uC+qm3L*nvars); m2LU = (uC+qm2L*nvars); m1LU = (uC+qm1L*nvars); p1LU = (uC+qp1L*nvars); p2LU = (uC+qp2L*nvars); double *m3RU, *m2RU, *m1RU, *p1RU, *p2RU; m3RU = (uC+qm3R*nvars); m2RU = (uC+qm2R*nvars); m1RU = (uC+qm1R*nvars); p1RU = (uC+qp1R*nvars); p2RU = (uC+qp2R*nvars); /* optimal weights*/ double c1, c2, c3; if (!strcmp(solver->spatial_scheme_hyp,_FIFTH_ORDER_CRWENO_)) { if ( ((mpi->ip[dir] == 0 ) && (indexI[dir] == 0 )) || ((mpi->ip[dir] == mpi->iproc[dir]-1) && (indexI[dir] == dim[dir])) ) { /* Use WENO5 at the physical boundaries */ c1 = _WENO_OPTIMAL_WEIGHT_1_; c2 = _WENO_OPTIMAL_WEIGHT_2_; c3 = _WENO_OPTIMAL_WEIGHT_3_; } else { /* CRWENO5 at the interior points */ c1 = _CRWENO_OPTIMAL_WEIGHT_1_; c2 = _CRWENO_OPTIMAL_WEIGHT_2_; c3 = _CRWENO_OPTIMAL_WEIGHT_3_; } } else { /* WENO5 and HCWENO5 */ c1 = _WENO_OPTIMAL_WEIGHT_1_; c2 = _WENO_OPTIMAL_WEIGHT_2_; c3 = _WENO_OPTIMAL_WEIGHT_3_; } /* calculate WENO weights */ _WENOWeights_v_Z_((ww1LF+p*nvars),(ww2LF+p*nvars),(ww3LF+p*nvars),c1,c2,c3,m3LF,m2LF,m1LF,p1LF,p2LF,weno,nvars); _WENOWeights_v_Z_((ww1RF+p*nvars),(ww2RF+p*nvars),(ww3RF+p*nvars),c1,c2,c3,m3RF,m2RF,m1RF,p1RF,p2RF,weno,nvars); _WENOWeights_v_Z_((ww1LU+p*nvars),(ww2LU+p*nvars),(ww3LU+p*nvars),c1,c2,c3,m3LU,m2LU,m1LU,p1LU,p2LU,weno,nvars); _WENOWeights_v_Z_((ww1RU+p*nvars),(ww2RU+p*nvars),(ww3RU+p*nvars),c1,c2,c3,m3RU,m2RU,m1RU,p1RU,p2RU,weno,nvars); } } return(0); } /*! Computes the nonlinear weights for the 5th order component-wise WENO-type schemes using the ESWENO formulation of Yamaleev & Carpenter. Note that only the formulation for the nonlinear weights is adopted and implemented here, not the ESWENO scheme as a whole. \f{equation}{ \omega_k = \frac {a_k} {\sum_{j=1}^3 a_j },\ a_k = c_k \left( 1 + \frac{\tau_5}{\beta_k+\epsilon} \right)^p,\ k = 1,2,3, \f} where \f$c_k\f$ are the optimal weights, \f$p\f$ is hardcoded to \f$2\f$, and \f$\epsilon\f$ is an input parameter (#WENOParameters::eps) (typically \f$10^{-6}\f$). The smoothness indicators \f$\beta_k\f$ are given by: \f{eqnarray}{ \beta_1 &=& \frac{13}{12} \left(f_{j-2}-2f_{j-1}+f_j\right)^2 + \frac{1}{4}\left(f_{j-2}-4f_{j-1}+3f_j\right)^2 \\ \beta_2 &=& \frac{13}{12} \left(f_{j-1}-2f_j+f_{j+1}\right)^2 + \frac{1}{4}\left(f_{j-1}-f_{j+1}\right)^2 \\ \beta_3 &=& \frac{13}{12} \left(f_j-2f_{j+1}+f_{j+2}\right)^2 + \frac{1}{4}\left(3f_j-4f_{j+1}+f_{j+2}\right)^2, \f} and \f$\tau_5 = \left( f_{j-2}-4f_{j-1}+6f_j-4f_{j+1}+f_{j+2} \right)^2\f$. \b Notes: + This function computes the weights for the entire grid (for interpolation along a given spatial dimension \a dir ). Thus, it loops over all grid lines along \a dir. + The weights are computed for all components, for both the left- and right-biased interpolations, and for both the flux function \f${\bf f}\left({\bf u}\right)\f$ and the solution \f$\bf u\f$. Thus, this approach of computing and storing the weights is quite memory-intensive. + The variable \a offset denotes the offset in the complete array from where the weights for interpolation along the current interpolation dimension (\a dir ) are stored. \b Reference: + Yamaleev, Carpenter, A systematic methodology for constructing high-order energy stable WENO schemes, J. Comput. Phys., 2009. http://dx.doi.org/10.1016/j.jcp.2009.03.002 */ int WENOFifthOrderCalculateWeightsYC( double *fC, /*!< Array of cell-centered values of the function \f${\bf f}\left({\bf u}\right)\f$ */ double *uC, /*!< Array of cell-centered values of the solution \f${\bf u}\f$ */ double *x, /*!< Grid coordinates */ int dir, /*!< Spatial dimension along which to interpolation */ void *s, /*!< Object of type #HyPar containing solver-related variables */ void *m /*!< Object of type #MPIVariables containing MPI-related variables */ ) { HyPar *solver = (HyPar*) s; WENOParameters *weno = (WENOParameters*) solver->interp; MPIVariables *mpi = (MPIVariables*) m; int i; double *ww1LF, *ww2LF, *ww3LF, *ww1RF, *ww2RF, *ww3RF; double *ww1LU, *ww2LU, *ww3LU, *ww1RU, *ww2RU, *ww3RU; int ghosts = solver->ghosts; int ndims = solver->ndims; int nvars = solver->nvars; int *dim = solver->dim_local; int *stride= solver->stride_with_ghosts; /* define some constants */ static const double thirteen_by_twelve = 13.0/12.0; static const double one_fourth = 1.0/4.0; /* calculate dimension offset */ int offset = weno->offset[dir]; /* create index and bounds for the outer loop, i.e., to loop over all 1D lines along dimension "dir" */ int indexC[ndims], indexI[ndims], index_outer[ndims], bounds_outer[ndims], bounds_inter[ndims]; _ArrayCopy1D_(dim,bounds_outer,ndims); bounds_outer[dir] = 1; _ArrayCopy1D_(dim,bounds_inter,ndims); bounds_inter[dir] += 1; int N_outer; _ArrayProduct1D_(bounds_outer,ndims,N_outer); ww1LF = weno->w1 + offset; ww2LF = weno->w2 + offset; ww3LF = weno->w3 + offset; ww1RF = weno->w1 + 2*weno->size + offset; ww2RF = weno->w2 + 2*weno->size + offset; ww3RF = weno->w3 + 2*weno->size + offset; ww1LU = weno->w1 + weno->size + offset; ww2LU = weno->w2 + weno->size + offset; ww3LU = weno->w3 + weno->size + offset; ww1RU = weno->w1 + 2*weno->size + weno->size + offset; ww2RU = weno->w2 + 2*weno->size + weno->size + offset; ww3RU = weno->w3 + 2*weno->size + weno->size + offset; #pragma omp parallel for schedule(auto) default(shared) private(i,index_outer,indexC,indexI) for (i=0; i<N_outer; i++) { _ArrayIndexnD_(ndims,i,bounds_outer,index_outer,0); _ArrayCopy1D_(index_outer,indexC,ndims); _ArrayCopy1D_(index_outer,indexI,ndims); for (indexI[dir] = 0; indexI[dir] < dim[dir]+1; indexI[dir]++) { int qm1L,qm2L,qm3L,qp1L,qp2L,p,qm1R,qm2R,qm3R,qp1R,qp2R; _ArrayIndex1D_(ndims,bounds_inter,indexI,0,p); indexC[dir] = indexI[dir]-1; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qm1L); qm3L = qm1L - 2*stride[dir]; qm2L = qm1L - stride[dir]; qp1L = qm1L + stride[dir]; qp2L = qm1L + 2*stride[dir]; indexC[dir] = indexI[dir] ; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qm1R); qm3R = qm1R + 2*stride[dir]; qm2R = qm1R + stride[dir]; qp1R = qm1R - stride[dir]; qp2R = qm1R - 2*stride[dir]; /* Defining stencil points */ double *m3LF, *m2LF, *m1LF, *p1LF, *p2LF; m3LF = (fC+qm3L*nvars); m2LF = (fC+qm2L*nvars); m1LF = (fC+qm1L*nvars); p1LF = (fC+qp1L*nvars); p2LF = (fC+qp2L*nvars); double *m3RF, *m2RF, *m1RF, *p1RF, *p2RF; m3RF = (fC+qm3R*nvars); m2RF = (fC+qm2R*nvars); m1RF = (fC+qm1R*nvars); p1RF = (fC+qp1R*nvars); p2RF = (fC+qp2R*nvars); double *m3LU, *m2LU, *m1LU, *p1LU, *p2LU; m3LU = (uC+qm3L*nvars); m2LU = (uC+qm2L*nvars); m1LU = (uC+qm1L*nvars); p1LU = (uC+qp1L*nvars); p2LU = (uC+qp2L*nvars); double *m3RU, *m2RU, *m1RU, *p1RU, *p2RU; m3RU = (uC+qm3R*nvars); m2RU = (uC+qm2R*nvars); m1RU = (uC+qm1R*nvars); p1RU = (uC+qp1R*nvars); p2RU = (uC+qp2R*nvars); /* optimal weights*/ double c1, c2, c3; if (!strcmp(solver->spatial_scheme_hyp,_FIFTH_ORDER_CRWENO_)) { if ( ((mpi->ip[dir] == 0 ) && (indexI[dir] == 0 )) || ((mpi->ip[dir] == mpi->iproc[dir]-1) && (indexI[dir] == dim[dir])) ) { /* Use WENO5 at the physical boundaries */ c1 = _WENO_OPTIMAL_WEIGHT_1_; c2 = _WENO_OPTIMAL_WEIGHT_2_; c3 = _WENO_OPTIMAL_WEIGHT_3_; } else { /* CRWENO5 at the interior points */ c1 = _CRWENO_OPTIMAL_WEIGHT_1_; c2 = _CRWENO_OPTIMAL_WEIGHT_2_; c3 = _CRWENO_OPTIMAL_WEIGHT_3_; } } else { /* WENO5 and HCWENO5 */ c1 = _WENO_OPTIMAL_WEIGHT_1_; c2 = _WENO_OPTIMAL_WEIGHT_2_; c3 = _WENO_OPTIMAL_WEIGHT_3_; } /* calculate WENO weights */ _WENOWeights_v_YC_((ww1LF+p*nvars),(ww2LF+p*nvars),(ww3LF+p*nvars),c1,c2,c3,m3LF,m2LF,m1LF,p1LF,p2LF,weno,nvars); _WENOWeights_v_YC_((ww1RF+p*nvars),(ww2RF+p*nvars),(ww3RF+p*nvars),c1,c2,c3,m3RF,m2RF,m1RF,p1RF,p2RF,weno,nvars); _WENOWeights_v_YC_((ww1LU+p*nvars),(ww2LU+p*nvars),(ww3LU+p*nvars),c1,c2,c3,m3LU,m2LU,m1LU,p1LU,p2LU,weno,nvars); _WENOWeights_v_YC_((ww1RU+p*nvars),(ww2RU+p*nvars),(ww3RU+p*nvars),c1,c2,c3,m3RU,m2RU,m1RU,p1RU,p2RU,weno,nvars); } } return(0); } /*! Computes the nonlinear weights for the 5th order characteristic-based WENO-type schemes using the original formulation of Jiang & Shu: \f{equation}{ \omega_k = \frac {a_k} {\sum_{j=1}^3 a_j },\ a_k = \frac {c_k} {\left(\beta_k+\epsilon\right)^p},\ k = 1,2,3, \f} where \f$c_k\f$ are the optimal weights, \f$p\f$ is hardcoded to \f$2\f$, and \f$\epsilon\f$ is an input parameter (#WENOParameters::eps) (typically \f$10^{-6}\f$). The smoothness indicators \f$\beta_k\f$ are given by: \f{eqnarray}{ \beta_1 &=& \frac{13}{12} \left(\alpha_{j-2}-2\alpha_{j-1}+\alpha_j\right)^2 + \frac{1}{4}\left(\alpha_{j-2}-4\alpha_{j-1}+3\alpha_j\right)^2 \\ \beta_2 &=& \frac{13}{12} \left(\alpha_{j-1}-2\alpha_j+\alpha_{j+1}\right)^2 + \frac{1}{4}\left(\alpha_{j-1}-\alpha_{j+1}\right)^2 \\ \beta_3 &=& \frac{13}{12} \left(\alpha_j-2\alpha_{j+1}+\alpha_{j+2}\right)^2 + \frac{1}{4}\left(3\alpha_j-4\alpha_{j+1}+\alpha_{j+2}\right)^2 \f} where \f$\alpha\f$ is the characteristic flux or the solution. \b Notes: + This function computes the weights for the entire grid (for interpolation along a given spatial dimension \a dir ). Thus, it loops over all grid lines along \a dir. + The weights are computed for all components, for both the left- and right-biased interpolations, and for both the flux function \f${\bf f}\left({\bf u}\right)\f$ and the solution \f$\bf u\f$. Thus, this approach of computing and storing the weights is quite memory-intensive. + The variable \a offset denotes the offset in the complete array from where the weights for interpolation along the current interpolation dimension (\a dir ) are stored. + This function requires functions to compute the average state and the left eigenvectors for the characteristic decomposition. These are provided by the physical model through - #HyPar::GetLeftEigenvectors() - #HyPar::AveragingFunction() \b Reference: + Jiang, Shu, Efficient Implementation of Weighted ENO Schemes, J. Comput. Phys., 1996. http://dx.doi.org/10.1006/jcph.1996.0130 */ int WENOFifthOrderCalculateWeightsCharJS( double *fC, /*!< Array of cell-centered values of the function \f${\bf f}\left({\bf u}\right)\f$ */ double *uC, /*!< Array of cell-centered values of the solution \f${\bf u}\f$ */ double *x, /*!< Grid coordinates */ int dir, /*!< Spatial dimension along which to interpolation */ void *s, /*!< Object of type #HyPar containing solver-related variables */ void *m /*!< Object of type #MPIVariables containing MPI-related variables */ ) { HyPar *solver = (HyPar*) s; WENOParameters *weno = (WENOParameters*) solver->interp; MPIVariables *mpi = (MPIVariables*) m; int i; double *ww1LF, *ww2LF, *ww3LF, *ww1RF, *ww2RF, *ww3RF; double *ww1LU, *ww2LU, *ww3LU, *ww1RU, *ww2RU, *ww3RU; int ghosts = solver->ghosts; int ndims = solver->ndims; int nvars = solver->nvars; int *dim = solver->dim_local; int *stride= solver->stride_with_ghosts; /* define some constants */ static const double thirteen_by_twelve = 13.0/12.0; static const double one_fourth = 1.0/4.0; /* calculate dimension offset */ int offset = weno->offset[dir]; /* create index and bounds for the outer loop, i.e., to loop over all 1D lines along dimension "dir" */ int indexC[ndims], indexI[ndims], index_outer[ndims], bounds_outer[ndims], bounds_inter[ndims]; _ArrayCopy1D_(dim,bounds_outer,ndims); bounds_outer[dir] = 1; _ArrayCopy1D_(dim,bounds_inter,ndims); bounds_inter[dir] += 1; int N_outer; _ArrayProduct1D_(bounds_outer,ndims,N_outer); /* allocate arrays for the averaged state, eigenvectors and characteristic interpolated f */ double L[nvars*nvars], uavg[nvars]; ww1LF = weno->w1 + offset; ww2LF = weno->w2 + offset; ww3LF = weno->w3 + offset; ww1RF = weno->w1 + 2*weno->size + offset; ww2RF = weno->w2 + 2*weno->size + offset; ww3RF = weno->w3 + 2*weno->size + offset; ww1LU = weno->w1 + weno->size + offset; ww2LU = weno->w2 + weno->size + offset; ww3LU = weno->w3 + weno->size + offset; ww1RU = weno->w1 + 2*weno->size + weno->size + offset; ww2RU = weno->w2 + 2*weno->size + weno->size + offset; ww3RU = weno->w3 + 2*weno->size + weno->size + offset; #pragma omp parallel for schedule(auto) default(shared) private(i,index_outer,indexC,indexI) for (i=0; i<N_outer; i++) { _ArrayIndexnD_(ndims,i,bounds_outer,index_outer,0); _ArrayCopy1D_(index_outer,indexC,ndims); _ArrayCopy1D_(index_outer,indexI,ndims); for (indexI[dir] = 0; indexI[dir] < dim[dir]+1; indexI[dir]++) { int qm1L,qm2L,qm3L,qp1L,qp2L,p,qm1R,qm2R,qm3R,qp1R,qp2R; _ArrayIndex1D_(ndims,bounds_inter,indexI,0,p); indexC[dir] = indexI[dir]-1; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qm1L); qm3L = qm1L - 2*stride[dir]; qm2L = qm1L - stride[dir]; qp1L = qm1L + stride[dir]; qp2L = qm1L + 2*stride[dir]; indexC[dir] = indexI[dir] ; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qm1R); qm3R = qm1R + 2*stride[dir]; qm2R = qm1R + stride[dir]; qp1R = qm1R - stride[dir]; qp2R = qm1R - 2*stride[dir]; /* find averaged state and left eigenvectors at this interface */ IERR solver->AveragingFunction(uavg,(uC+nvars*qm1L),(uC+nvars*qp1L),solver->physics); CHECKERR(ierr); IERR solver->GetLeftEigenvectors(uavg,L,solver->physics,dir); CHECKERR(ierr); /* Defining stencil points */ double m3LF[nvars], m2LF[nvars], m1LF[nvars], p1LF[nvars], p2LF[nvars]; double m3RF[nvars], m2RF[nvars], m1RF[nvars], p1RF[nvars], p2RF[nvars]; double m3LU[nvars], m2LU[nvars], m1LU[nvars], p1LU[nvars], p2LU[nvars]; double m3RU[nvars], m2RU[nvars], m1RU[nvars], p1RU[nvars], p2RU[nvars]; MatVecMult(nvars,m3LF,L,(fC+nvars*qm3L)); MatVecMult(nvars,m2LF,L,(fC+nvars*qm2L)); MatVecMult(nvars,m1LF,L,(fC+nvars*qm1L)); MatVecMult(nvars,p1LF,L,(fC+nvars*qp1L)); MatVecMult(nvars,p2LF,L,(fC+nvars*qp2L)); MatVecMult(nvars,m3RF,L,(fC+nvars*qm3R)); MatVecMult(nvars,m2RF,L,(fC+nvars*qm2R)); MatVecMult(nvars,m1RF,L,(fC+nvars*qm1R)); MatVecMult(nvars,p1RF,L,(fC+nvars*qp1R)); MatVecMult(nvars,p2RF,L,(fC+nvars*qp2R)); MatVecMult(nvars,m3LU,L,(uC+nvars*qm3L)); MatVecMult(nvars,m2LU,L,(uC+nvars*qm2L)); MatVecMult(nvars,m1LU,L,(uC+nvars*qm1L)); MatVecMult(nvars,p1LU,L,(uC+nvars*qp1L)); MatVecMult(nvars,p2LU,L,(uC+nvars*qp2L)); MatVecMult(nvars,m3RU,L,(uC+nvars*qm3R)); MatVecMult(nvars,m2RU,L,(uC+nvars*qm2R)); MatVecMult(nvars,m1RU,L,(uC+nvars*qm1R)); MatVecMult(nvars,p1RU,L,(uC+nvars*qp1R)); MatVecMult(nvars,p2RU,L,(uC+nvars*qp2R)); /* optimal weights*/ double c1, c2, c3; if (!strcmp(solver->spatial_scheme_hyp,_FIFTH_ORDER_CRWENO_)) { if ( ((mpi->ip[dir] == 0 ) && (indexI[dir] == 0 )) || ((mpi->ip[dir] == mpi->iproc[dir]-1) && (indexI[dir] == dim[dir])) ) { /* Use WENO5 at the physical boundaries */ c1 = _WENO_OPTIMAL_WEIGHT_1_; c2 = _WENO_OPTIMAL_WEIGHT_2_; c3 = _WENO_OPTIMAL_WEIGHT_3_; } else { /* CRWENO5 at the interior points */ c1 = _CRWENO_OPTIMAL_WEIGHT_1_; c2 = _CRWENO_OPTIMAL_WEIGHT_2_; c3 = _CRWENO_OPTIMAL_WEIGHT_3_; } } else { /* WENO5 and HCWENO5 */ c1 = _WENO_OPTIMAL_WEIGHT_1_; c2 = _WENO_OPTIMAL_WEIGHT_2_; c3 = _WENO_OPTIMAL_WEIGHT_3_; } /* calculate WENO weights */ _WENOWeights_v_JS_((ww1LF+p*nvars),(ww2LF+p*nvars),(ww3LF+p*nvars),c1,c2,c3,m3LF,m2LF,m1LF,p1LF,p2LF,weno,nvars); _WENOWeights_v_JS_((ww1RF+p*nvars),(ww2RF+p*nvars),(ww3RF+p*nvars),c1,c2,c3,m3RF,m2RF,m1RF,p1RF,p2RF,weno,nvars); _WENOWeights_v_JS_((ww1LU+p*nvars),(ww2LU+p*nvars),(ww3LU+p*nvars),c1,c2,c3,m3LU,m2LU,m1LU,p1LU,p2LU,weno,nvars); _WENOWeights_v_JS_((ww1RU+p*nvars),(ww2RU+p*nvars),(ww3RU+p*nvars),c1,c2,c3,m3RU,m2RU,m1RU,p1RU,p2RU,weno,nvars); } } return(0); } /*! Computes the nonlinear weights for the 5th order characteristic-based WENO-type schemes using the mapped formulation of Henrick, Aslam & Powers: \f{eqnarray}{ \omega_k &=& \frac {a_k} {\sum_{j=1}^3 a_j },\ a_k = \frac {\tilde{\omega}_k \left( c_k + c_k^2 - 3c_k\tilde{\omega}_k + \tilde{\omega}_k^2\right)} {c_k^2 + \tilde{\omega}_k\left(1-2c_k\right)}, \\ \tilde{\omega}_k &=& \frac {\tilde{a}_k} {\sum_{j=1}^3 \tilde{a}_j },\ \tilde{a}_k = \frac {c_k} {\left(\beta_k+\epsilon\right)^p},\ k = 1,2,3, \f} where \f$c_k\f$ are the optimal weights, \f$p\f$ is hardcoded to \f$2\f$, and \f$\epsilon\f$ is an input parameter (#WENOParameters::eps) (typically \f$10^{-6}\f$). The smoothness indicators \f$\beta_k\f$ are given by: \f{eqnarray}{ \beta_1 &=& \frac{13}{12} \left(\alpha_{j-2}-2\alpha_{j-1}+\alpha_j\right)^2 + \frac{1}{4}\left(\alpha_{j-2}-4\alpha_{j-1}+3\alpha_j\right)^2 \\ \beta_2 &=& \frac{13}{12} \left(\alpha_{j-1}-2\alpha_j+\alpha_{j+1}\right)^2 + \frac{1}{4}\left(\alpha_{j-1}-\alpha_{j+1}\right)^2 \\ \beta_3 &=& \frac{13}{12} \left(\alpha_j-2\alpha_{j+1}+\alpha_{j+2}\right)^2 + \frac{1}{4}\left(3\alpha_j-4\alpha_{j+1}+\alpha_{j+2}\right)^2 \f} where \f$\alpha\f$ is the characteristic flux or the solution. \b Notes: + This function computes the weights for the entire grid (for interpolation along a given spatial dimension \a dir ). Thus, it loops over all grid lines along \a dir. + The weights are computed for all components, for both the left- and right-biased interpolations, and for both the flux function \f${\bf f}\left({\bf u}\right)\f$ and the solution \f$\bf u\f$. Thus, this approach of computing and storing the weights is quite memory-intensive. + The variable \a offset denotes the offset in the complete array from where the weights for interpolation along the current interpolation dimension (\a dir ) are stored. + This function requires functions to compute the average state and the left eigenvectors for the characteristic decomposition. These are provided by the physical model through - #HyPar::GetLeftEigenvectors() - #HyPar::AveragingFunction() \b Reference: + Henrick, Aslam, Powers, Mapped weighted essentially non-oscillatory schemes: Achieving optimal order near critical points, J. Comput. Phys., 2005. http://dx.doi.org/10.1016/j.jcp.2005.01.023 */ int WENOFifthOrderCalculateWeightsCharM( double *fC, /*!< Array of cell-centered values of the function \f${\bf f}\left({\bf u}\right)\f$ */ double *uC, /*!< Array of cell-centered values of the solution \f${\bf u}\f$ */ double *x, /*!< Grid coordinates */ int dir, /*!< Spatial dimension along which to interpolation */ void *s, /*!< Object of type #HyPar containing solver-related variables */ void *m /*!< Object of type #MPIVariables containing MPI-related variables */ ) { HyPar *solver = (HyPar*) s; WENOParameters *weno = (WENOParameters*) solver->interp; MPIVariables *mpi = (MPIVariables*) m; int i; double *ww1LF, *ww2LF, *ww3LF, *ww1RF, *ww2RF, *ww3RF; double *ww1LU, *ww2LU, *ww3LU, *ww1RU, *ww2RU, *ww3RU; int ghosts = solver->ghosts; int ndims = solver->ndims; int nvars = solver->nvars; int *dim = solver->dim_local; int *stride= solver->stride_with_ghosts; /* define some constants */ static const double thirteen_by_twelve = 13.0/12.0; static const double one_fourth = 1.0/4.0; /* calculate dimension offset */ int offset = weno->offset[dir]; /* create index and bounds for the outer loop, i.e., to loop over all 1D lines along dimension "dir" */ int indexC[ndims], indexI[ndims], index_outer[ndims], bounds_outer[ndims], bounds_inter[ndims]; _ArrayCopy1D_(dim,bounds_outer,ndims); bounds_outer[dir] = 1; _ArrayCopy1D_(dim,bounds_inter,ndims); bounds_inter[dir] += 1; int N_outer; _ArrayProduct1D_(bounds_outer,ndims,N_outer); /* allocate arrays for the averaged state, eigenvectors and characteristic interpolated f */ double L[nvars*nvars], uavg[nvars]; ww1LF = weno->w1 + offset; ww2LF = weno->w2 + offset; ww3LF = weno->w3 + offset; ww1RF = weno->w1 + 2*weno->size + offset; ww2RF = weno->w2 + 2*weno->size + offset; ww3RF = weno->w3 + 2*weno->size + offset; ww1LU = weno->w1 + weno->size + offset; ww2LU = weno->w2 + weno->size + offset; ww3LU = weno->w3 + weno->size + offset; ww1RU = weno->w1 + 2*weno->size + weno->size + offset; ww2RU = weno->w2 + 2*weno->size + weno->size + offset; ww3RU = weno->w3 + 2*weno->size + weno->size + offset; #pragma omp parallel for schedule(auto) default(shared) private(i,index_outer,indexC,indexI) for (i=0; i<N_outer; i++) { _ArrayIndexnD_(ndims,i,bounds_outer,index_outer,0); _ArrayCopy1D_(index_outer,indexC,ndims); _ArrayCopy1D_(index_outer,indexI,ndims); for (indexI[dir] = 0; indexI[dir] < dim[dir]+1; indexI[dir]++) { int qm1L,qm2L,qm3L,qp1L,qp2L,p,qm1R,qm2R,qm3R,qp1R,qp2R; _ArrayIndex1D_(ndims,bounds_inter,indexI,0,p); indexC[dir] = indexI[dir]-1; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qm1L); qm3L = qm1L - 2*stride[dir]; qm2L = qm1L - stride[dir]; qp1L = qm1L + stride[dir]; qp2L = qm1L + 2*stride[dir]; indexC[dir] = indexI[dir] ; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qm1R); qm3R = qm1R + 2*stride[dir]; qm2R = qm1R + stride[dir]; qp1R = qm1R - stride[dir]; qp2R = qm1R - 2*stride[dir]; /* find averaged state and left eigenvectors at this interface */ IERR solver->AveragingFunction(uavg,(uC+nvars*qm1L),(uC+nvars*qp1L),solver->physics); CHECKERR(ierr); IERR solver->GetLeftEigenvectors(uavg,L,solver->physics,dir); CHECKERR(ierr); /* Defining stencil points */ double m3LF[nvars], m2LF[nvars], m1LF[nvars], p1LF[nvars], p2LF[nvars]; double m3RF[nvars], m2RF[nvars], m1RF[nvars], p1RF[nvars], p2RF[nvars]; double m3LU[nvars], m2LU[nvars], m1LU[nvars], p1LU[nvars], p2LU[nvars]; double m3RU[nvars], m2RU[nvars], m1RU[nvars], p1RU[nvars], p2RU[nvars]; MatVecMult(nvars,m3LF,L,(fC+nvars*qm3L)); MatVecMult(nvars,m2LF,L,(fC+nvars*qm2L)); MatVecMult(nvars,m1LF,L,(fC+nvars*qm1L)); MatVecMult(nvars,p1LF,L,(fC+nvars*qp1L)); MatVecMult(nvars,p2LF,L,(fC+nvars*qp2L)); MatVecMult(nvars,m3RF,L,(fC+nvars*qm3R)); MatVecMult(nvars,m2RF,L,(fC+nvars*qm2R)); MatVecMult(nvars,m1RF,L,(fC+nvars*qm1R)); MatVecMult(nvars,p1RF,L,(fC+nvars*qp1R)); MatVecMult(nvars,p2RF,L,(fC+nvars*qp2R)); MatVecMult(nvars,m3LU,L,(uC+nvars*qm3L)); MatVecMult(nvars,m2LU,L,(uC+nvars*qm2L)); MatVecMult(nvars,m1LU,L,(uC+nvars*qm1L)); MatVecMult(nvars,p1LU,L,(uC+nvars*qp1L)); MatVecMult(nvars,p2LU,L,(uC+nvars*qp2L)); MatVecMult(nvars,m3RU,L,(uC+nvars*qm3R)); MatVecMult(nvars,m2RU,L,(uC+nvars*qm2R)); MatVecMult(nvars,m1RU,L,(uC+nvars*qm1R)); MatVecMult(nvars,p1RU,L,(uC+nvars*qp1R)); MatVecMult(nvars,p2RU,L,(uC+nvars*qp2R)); /* optimal weights*/ double c1, c2, c3; if (!strcmp(solver->spatial_scheme_hyp,_FIFTH_ORDER_CRWENO_)) { if ( ((mpi->ip[dir] == 0 ) && (indexI[dir] == 0 )) || ((mpi->ip[dir] == mpi->iproc[dir]-1) && (indexI[dir] == dim[dir])) ) { /* Use WENO5 at the physical boundaries */ c1 = _WENO_OPTIMAL_WEIGHT_1_; c2 = _WENO_OPTIMAL_WEIGHT_2_; c3 = _WENO_OPTIMAL_WEIGHT_3_; } else { /* CRWENO5 at the interior points */ c1 = _CRWENO_OPTIMAL_WEIGHT_1_; c2 = _CRWENO_OPTIMAL_WEIGHT_2_; c3 = _CRWENO_OPTIMAL_WEIGHT_3_; } } else { /* WENO5 and HCWENO5 */ c1 = _WENO_OPTIMAL_WEIGHT_1_; c2 = _WENO_OPTIMAL_WEIGHT_2_; c3 = _WENO_OPTIMAL_WEIGHT_3_; } /* calculate WENO weights */ _WENOWeights_v_M_((ww1LF+p*nvars),(ww2LF+p*nvars),(ww3LF+p*nvars),c1,c2,c3,m3LF,m2LF,m1LF,p1LF,p2LF,weno,nvars); _WENOWeights_v_M_((ww1RF+p*nvars),(ww2RF+p*nvars),(ww3RF+p*nvars),c1,c2,c3,m3RF,m2RF,m1RF,p1RF,p2RF,weno,nvars); _WENOWeights_v_M_((ww1LU+p*nvars),(ww2LU+p*nvars),(ww3LU+p*nvars),c1,c2,c3,m3LU,m2LU,m1LU,p1LU,p2LU,weno,nvars); _WENOWeights_v_M_((ww1RU+p*nvars),(ww2RU+p*nvars),(ww3RU+p*nvars),c1,c2,c3,m3RU,m2RU,m1RU,p1RU,p2RU,weno,nvars); } } return(0); } /*! Computes the nonlinear weights for the 5th order characteristic-based WENO-type schemes using the WENO-Z formulation: \f{equation}{ \omega_k = \frac {a_k} {\sum_{j=1}^3 a_j },\ a_k = c_k \left( 1 + \frac{\tau_5}{\beta_k+\epsilon} \right)^p,\ k = 1,2,3, \f} where \f$c_k\f$ are the optimal weights, \f$p\f$ is hardcoded to \f$2\f$, and \f$\epsilon\f$ is an input parameter (#WENOParameters::eps) (typically \f$10^{-6}\f$). The smoothness indicators \f$\beta_k\f$ are given by: \f{eqnarray}{ \beta_1 &=& \frac{13}{12} \left(\alpha_{j-2}-2\alpha_{j-1}+\alpha_j\right)^2 + \frac{1}{4}\left(\alpha_{j-2}-4\alpha_{j-1}+3\alpha_j\right)^2 \\ \beta_2 &=& \frac{13}{12} \left(\alpha_{j-1}-2\alpha_j+\alpha_{j+1}\right)^2 + \frac{1}{4}\left(\alpha_{j-1}-\alpha_{j+1}\right)^2 \\ \beta_3 &=& \frac{13}{12} \left(\alpha_j-2\alpha_{j+1}+\alpha_{j+2}\right)^2 + \frac{1}{4}\left(3\alpha_j-4\alpha_{j+1}+\alpha_{j+2}\right)^2 \f} and \f$\tau_5 = \left|\beta_1 - \beta_3 \right|\f$, and \f$\alpha\f$ is the characteristic flux or the solution. \b Notes: + This function computes the weights for the entire grid (for interpolation along a given spatial dimension \a dir ). Thus, it loops over all grid lines along \a dir. + The weights are computed for all components, for both the left- and right-biased interpolations, and for both the flux function \f${\bf f}\left({\bf u}\right)\f$ and the solution \f$\bf u\f$. Thus, this approach of computing and storing the weights is quite memory-intensive. + The variable \a offset denotes the offset in the complete array from where the weights for interpolation along the current interpolation dimension (\a dir ) are stored. + This function requires functions to compute the average state and the left eigenvectors for the characteristic decomposition. These are provided by the physical model through - #HyPar::GetLeftEigenvectors() - #HyPar::AveragingFunction() \b Reference: + Borges, et. al., An improved weighted essentially non-oscillatory scheme for hyperbolic conservation laws, J. Comput. Phys., 2008. http://dx.doi.org/10.1016/j.jcp.2007.11.038 + Castro, M., Costa, B., Don, W. S., High order weighted essentially non-oscillatory WENO-Z schemes for hyperbolic conservation laws, J. Comput. Phys., 2011. http://dx.doi.org/10.1016/j.jcp.2010.11.028 */ int WENOFifthOrderCalculateWeightsCharZ( double *fC, /*!< Array of cell-centered values of the function \f${\bf f}\left({\bf u}\right)\f$ */ double *uC, /*!< Array of cell-centered values of the solution \f${\bf u}\f$ */ double *x, /*!< Grid coordinates */ int dir, /*!< Spatial dimension along which to interpolation */ void *s, /*!< Object of type #HyPar containing solver-related variables */ void *m /*!< Object of type #MPIVariables containing MPI-related variables */ ) { HyPar *solver = (HyPar*) s; WENOParameters *weno = (WENOParameters*) solver->interp; MPIVariables *mpi = (MPIVariables*) m; int i; double *ww1LF, *ww2LF, *ww3LF, *ww1RF, *ww2RF, *ww3RF; double *ww1LU, *ww2LU, *ww3LU, *ww1RU, *ww2RU, *ww3RU; int ghosts = solver->ghosts; int ndims = solver->ndims; int nvars = solver->nvars; int *dim = solver->dim_local; int *stride= solver->stride_with_ghosts; /* define some constants */ static const double thirteen_by_twelve = 13.0/12.0; static const double one_fourth = 1.0/4.0; /* calculate dimension offset */ int offset = weno->offset[dir]; /* create index and bounds for the outer loop, i.e., to loop over all 1D lines along dimension "dir" */ int indexC[ndims], indexI[ndims], index_outer[ndims], bounds_outer[ndims], bounds_inter[ndims]; _ArrayCopy1D_(dim,bounds_outer,ndims); bounds_outer[dir] = 1; _ArrayCopy1D_(dim,bounds_inter,ndims); bounds_inter[dir] += 1; int N_outer; _ArrayProduct1D_(bounds_outer,ndims,N_outer); /* allocate arrays for the averaged state, eigenvectors and characteristic interpolated f */ double L[nvars*nvars], uavg[nvars]; ww1LF = weno->w1 + offset; ww2LF = weno->w2 + offset; ww3LF = weno->w3 + offset; ww1RF = weno->w1 + 2*weno->size + offset; ww2RF = weno->w2 + 2*weno->size + offset; ww3RF = weno->w3 + 2*weno->size + offset; ww1LU = weno->w1 + weno->size + offset; ww2LU = weno->w2 + weno->size + offset; ww3LU = weno->w3 + weno->size + offset; ww1RU = weno->w1 + 2*weno->size + weno->size + offset; ww2RU = weno->w2 + 2*weno->size + weno->size + offset; ww3RU = weno->w3 + 2*weno->size + weno->size + offset; #pragma omp parallel for schedule(auto) default(shared) private(i,index_outer,indexC,indexI) for (i=0; i<N_outer; i++) { _ArrayIndexnD_(ndims,i,bounds_outer,index_outer,0); _ArrayCopy1D_(index_outer,indexC,ndims); _ArrayCopy1D_(index_outer,indexI,ndims); for (indexI[dir] = 0; indexI[dir] < dim[dir]+1; indexI[dir]++) { int qm1L,qm2L,qm3L,qp1L,qp2L,p,qm1R,qm2R,qm3R,qp1R,qp2R; _ArrayIndex1D_(ndims,bounds_inter,indexI,0,p); indexC[dir] = indexI[dir]-1; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qm1L); qm3L = qm1L - 2*stride[dir]; qm2L = qm1L - stride[dir]; qp1L = qm1L + stride[dir]; qp2L = qm1L + 2*stride[dir]; indexC[dir] = indexI[dir] ; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qm1R); qm3R = qm1R + 2*stride[dir]; qm2R = qm1R + stride[dir]; qp1R = qm1R - stride[dir]; qp2R = qm1R - 2*stride[dir]; /* find averaged state and left eigenvectors at this interface */ IERR solver->AveragingFunction(uavg,(uC+nvars*qm1L),(uC+nvars*qp1L),solver->physics); CHECKERR(ierr); IERR solver->GetLeftEigenvectors(uavg,L,solver->physics,dir); CHECKERR(ierr); /* Defining stencil points */ double m3LF[nvars], m2LF[nvars], m1LF[nvars], p1LF[nvars], p2LF[nvars]; double m3RF[nvars], m2RF[nvars], m1RF[nvars], p1RF[nvars], p2RF[nvars]; double m3LU[nvars], m2LU[nvars], m1LU[nvars], p1LU[nvars], p2LU[nvars]; double m3RU[nvars], m2RU[nvars], m1RU[nvars], p1RU[nvars], p2RU[nvars]; MatVecMult(nvars,m3LF,L,(fC+nvars*qm3L)); MatVecMult(nvars,m2LF,L,(fC+nvars*qm2L)); MatVecMult(nvars,m1LF,L,(fC+nvars*qm1L)); MatVecMult(nvars,p1LF,L,(fC+nvars*qp1L)); MatVecMult(nvars,p2LF,L,(fC+nvars*qp2L)); MatVecMult(nvars,m3RF,L,(fC+nvars*qm3R)); MatVecMult(nvars,m2RF,L,(fC+nvars*qm2R)); MatVecMult(nvars,m1RF,L,(fC+nvars*qm1R)); MatVecMult(nvars,p1RF,L,(fC+nvars*qp1R)); MatVecMult(nvars,p2RF,L,(fC+nvars*qp2R)); MatVecMult(nvars,m3LU,L,(uC+nvars*qm3L)); MatVecMult(nvars,m2LU,L,(uC+nvars*qm2L)); MatVecMult(nvars,m1LU,L,(uC+nvars*qm1L)); MatVecMult(nvars,p1LU,L,(uC+nvars*qp1L)); MatVecMult(nvars,p2LU,L,(uC+nvars*qp2L)); MatVecMult(nvars,m3RU,L,(uC+nvars*qm3R)); MatVecMult(nvars,m2RU,L,(uC+nvars*qm2R)); MatVecMult(nvars,m1RU,L,(uC+nvars*qm1R)); MatVecMult(nvars,p1RU,L,(uC+nvars*qp1R)); MatVecMult(nvars,p2RU,L,(uC+nvars*qp2R)); /* optimal weights*/ double c1, c2, c3; if (!strcmp(solver->spatial_scheme_hyp,_FIFTH_ORDER_CRWENO_)) { if ( ((mpi->ip[dir] == 0 ) && (indexI[dir] == 0 )) || ((mpi->ip[dir] == mpi->iproc[dir]-1) && (indexI[dir] == dim[dir])) ) { /* Use WENO5 at the physical boundaries */ c1 = _WENO_OPTIMAL_WEIGHT_1_; c2 = _WENO_OPTIMAL_WEIGHT_2_; c3 = _WENO_OPTIMAL_WEIGHT_3_; } else { /* CRWENO5 at the interior points */ c1 = _CRWENO_OPTIMAL_WEIGHT_1_; c2 = _CRWENO_OPTIMAL_WEIGHT_2_; c3 = _CRWENO_OPTIMAL_WEIGHT_3_; } } else { /* WENO5 and HCWENO5 */ c1 = _WENO_OPTIMAL_WEIGHT_1_; c2 = _WENO_OPTIMAL_WEIGHT_2_; c3 = _WENO_OPTIMAL_WEIGHT_3_; } /* calculate WENO weights */ _WENOWeights_v_Z_((ww1LF+p*nvars),(ww2LF+p*nvars),(ww3LF+p*nvars),c1,c2,c3,m3LF,m2LF,m1LF,p1LF,p2LF,weno,nvars); _WENOWeights_v_Z_((ww1RF+p*nvars),(ww2RF+p*nvars),(ww3RF+p*nvars),c1,c2,c3,m3RF,m2RF,m1RF,p1RF,p2RF,weno,nvars); _WENOWeights_v_Z_((ww1LU+p*nvars),(ww2LU+p*nvars),(ww3LU+p*nvars),c1,c2,c3,m3LU,m2LU,m1LU,p1LU,p2LU,weno,nvars); _WENOWeights_v_Z_((ww1RU+p*nvars),(ww2RU+p*nvars),(ww3RU+p*nvars),c1,c2,c3,m3RU,m2RU,m1RU,p1RU,p2RU,weno,nvars); } } return(0); } /*! Computes the nonlinear weights for the 5th order characteristic-based WENO-type schemes using the ESWENO formulation of Yamaleev & Carpenter. Note that only the formulation for the nonlinear weights is adopted and implemented here, not the ESWENO scheme as a whole. \f{equation}{ \omega_k = \frac {a_k} {\sum_{j=1}^3 a_j },\ a_k = c_k \left( 1 + \frac{\tau_5}{\beta_k+\epsilon} \right)^p,\ k = 1,2,3, \f} where \f$c_k\f$ are the optimal weights, \f$p\f$ is hardcoded to \f$2\f$, and \f$\epsilon\f$ is an input parameter (#WENOParameters::eps) (typically \f$10^{-6}\f$). The smoothness indicators \f$\beta_k\f$ are given by: \f{eqnarray}{ \beta_1 &=& \frac{13}{12} \left(\alpha_{j-2}-2\alpha_{j-1}+\alpha_j\right)^2 + \frac{1}{4}\left(\alpha_{j-2}-4\alpha_{j-1}+3\alpha_j\right)^2 \\ \beta_2 &=& \frac{13}{12} \left(\alpha_{j-1}-2\alpha_j+\alpha_{j+1}\right)^2 + \frac{1}{4}\left(\alpha_{j-1}-\alpha_{j+1}\right)^2 \\ \beta_3 &=& \frac{13}{12} \left(\alpha_j-2\alpha_{j+1}+\alpha_{j+2}\right)^2 + \frac{1}{4}\left(3\alpha_j-4\alpha_{j+1}+\alpha_{j+2}\right)^2 \f} and \f$\tau_5 = \left( \alpha_{j-2}-4\alpha_{j-1}+6\alpha_j-4\alpha_{j+1}+\alpha_{j+2} \right)^2\f$ and \f$\alpha\f$ is the characteristic flux or the solution. \b Notes: + This function computes the weights for the entire grid (for interpolation along a given spatial dimension \a dir ). Thus, it loops over all grid lines along \a dir. + The weights are computed for all components, for both the left- and right-biased interpolations, and for both the flux function \f${\bf f}\left({\bf u}\right)\f$ and the solution \f$\bf u\f$. Thus, this approach of computing and storing the weights is quite memory-intensive. + The variable \a offset denotes the offset in the complete array from where the weights for interpolation along the current interpolation dimension (\a dir ) are stored. + This function requires functions to compute the average state and the left eigenvectors for the characteristic decomposition. These are provided by the physical model through - #HyPar::GetLeftEigenvectors() - #HyPar::AveragingFunction() \b Reference: + Yamaleev, Carpenter, A systematic methodology for constructing high-order energy stable WENO schemes, J. Comput. Phys., 2009. http://dx.doi.org/10.1016/j.jcp.2009.03.002 */ int WENOFifthOrderCalculateWeightsCharYC( double *fC, /*!< Array of cell-centered values of the function \f${\bf f}\left({\bf u}\right)\f$ */ double *uC, /*!< Array of cell-centered values of the solution \f${\bf u}\f$ */ double *x, /*!< Grid coordinates */ int dir, /*!< Spatial dimension along which to interpolation */ void *s, /*!< Object of type #HyPar containing solver-related variables */ void *m /*!< Object of type #MPIVariables containing MPI-related variables */ ) { HyPar *solver = (HyPar*) s; WENOParameters *weno = (WENOParameters*) solver->interp; MPIVariables *mpi = (MPIVariables*) m; int i; double *ww1LF, *ww2LF, *ww3LF, *ww1RF, *ww2RF, *ww3RF; double *ww1LU, *ww2LU, *ww3LU, *ww1RU, *ww2RU, *ww3RU; int ghosts = solver->ghosts; int ndims = solver->ndims; int nvars = solver->nvars; int *dim = solver->dim_local; int *stride= solver->stride_with_ghosts; /* define some constants */ static const double thirteen_by_twelve = 13.0/12.0; static const double one_fourth = 1.0/4.0; /* calculate dimension offset */ int offset = weno->offset[dir]; /* create index and bounds for the outer loop, i.e., to loop over all 1D lines along dimension "dir" */ int indexC[ndims], indexI[ndims], index_outer[ndims], bounds_outer[ndims], bounds_inter[ndims]; _ArrayCopy1D_(dim,bounds_outer,ndims); bounds_outer[dir] = 1; _ArrayCopy1D_(dim,bounds_inter,ndims); bounds_inter[dir] += 1; int N_outer; _ArrayProduct1D_(bounds_outer,ndims,N_outer); /* allocate arrays for the averaged state, eigenvectors and characteristic interpolated f */ double L[nvars*nvars], uavg[nvars]; ww1LF = weno->w1 + offset; ww2LF = weno->w2 + offset; ww3LF = weno->w3 + offset; ww1RF = weno->w1 + 2*weno->size + offset; ww2RF = weno->w2 + 2*weno->size + offset; ww3RF = weno->w3 + 2*weno->size + offset; ww1LU = weno->w1 + weno->size + offset; ww2LU = weno->w2 + weno->size + offset; ww3LU = weno->w3 + weno->size + offset; ww1RU = weno->w1 + 2*weno->size + weno->size + offset; ww2RU = weno->w2 + 2*weno->size + weno->size + offset; ww3RU = weno->w3 + 2*weno->size + weno->size + offset; #pragma omp parallel for schedule(auto) default(shared) private(i,index_outer,indexC,indexI) for (i=0; i<N_outer; i++) { _ArrayIndexnD_(ndims,i,bounds_outer,index_outer,0); _ArrayCopy1D_(index_outer,indexC,ndims); _ArrayCopy1D_(index_outer,indexI,ndims); for (indexI[dir] = 0; indexI[dir] < dim[dir]+1; indexI[dir]++) { int qm1L,qm2L,qm3L,qp1L,qp2L,p,qm1R,qm2R,qm3R,qp1R,qp2R; _ArrayIndex1D_(ndims,bounds_inter,indexI,0,p); indexC[dir] = indexI[dir]-1; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qm1L); qm3L = qm1L - 2*stride[dir]; qm2L = qm1L - stride[dir]; qp1L = qm1L + stride[dir]; qp2L = qm1L + 2*stride[dir]; indexC[dir] = indexI[dir] ; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qm1R); qm3R = qm1R + 2*stride[dir]; qm2R = qm1R + stride[dir]; qp1R = qm1R - stride[dir]; qp2R = qm1R - 2*stride[dir]; /* find averaged state and left eigenvectors at this interface */ IERR solver->AveragingFunction(uavg,(uC+nvars*qm1L),(uC+nvars*qp1L),solver->physics); CHECKERR(ierr); IERR solver->GetLeftEigenvectors(uavg,L,solver->physics,dir); CHECKERR(ierr); /* Defining stencil points */ double m3LF[nvars], m2LF[nvars], m1LF[nvars], p1LF[nvars], p2LF[nvars]; double m3RF[nvars], m2RF[nvars], m1RF[nvars], p1RF[nvars], p2RF[nvars]; double m3LU[nvars], m2LU[nvars], m1LU[nvars], p1LU[nvars], p2LU[nvars]; double m3RU[nvars], m2RU[nvars], m1RU[nvars], p1RU[nvars], p2RU[nvars]; MatVecMult(nvars,m3LF,L,(fC+nvars*qm3L)); MatVecMult(nvars,m2LF,L,(fC+nvars*qm2L)); MatVecMult(nvars,m1LF,L,(fC+nvars*qm1L)); MatVecMult(nvars,p1LF,L,(fC+nvars*qp1L)); MatVecMult(nvars,p2LF,L,(fC+nvars*qp2L)); MatVecMult(nvars,m3RF,L,(fC+nvars*qm3R)); MatVecMult(nvars,m2RF,L,(fC+nvars*qm2R)); MatVecMult(nvars,m1RF,L,(fC+nvars*qm1R)); MatVecMult(nvars,p1RF,L,(fC+nvars*qp1R)); MatVecMult(nvars,p2RF,L,(fC+nvars*qp2R)); MatVecMult(nvars,m3LU,L,(uC+nvars*qm3L)); MatVecMult(nvars,m2LU,L,(uC+nvars*qm2L)); MatVecMult(nvars,m1LU,L,(uC+nvars*qm1L)); MatVecMult(nvars,p1LU,L,(uC+nvars*qp1L)); MatVecMult(nvars,p2LU,L,(uC+nvars*qp2L)); MatVecMult(nvars,m3RU,L,(uC+nvars*qm3R)); MatVecMult(nvars,m2RU,L,(uC+nvars*qm2R)); MatVecMult(nvars,m1RU,L,(uC+nvars*qm1R)); MatVecMult(nvars,p1RU,L,(uC+nvars*qp1R)); MatVecMult(nvars,p2RU,L,(uC+nvars*qp2R)); /* optimal weights*/ double c1, c2, c3; if (!strcmp(solver->spatial_scheme_hyp,_FIFTH_ORDER_CRWENO_)) { if ( ((mpi->ip[dir] == 0 ) && (indexI[dir] == 0 )) || ((mpi->ip[dir] == mpi->iproc[dir]-1) && (indexI[dir] == dim[dir])) ) { /* Use WENO5 at the physical boundaries */ c1 = _WENO_OPTIMAL_WEIGHT_1_; c2 = _WENO_OPTIMAL_WEIGHT_2_; c3 = _WENO_OPTIMAL_WEIGHT_3_; } else { /* CRWENO5 at the interior points */ c1 = _CRWENO_OPTIMAL_WEIGHT_1_; c2 = _CRWENO_OPTIMAL_WEIGHT_2_; c3 = _CRWENO_OPTIMAL_WEIGHT_3_; } } else { /* WENO5 and HCWENO5 */ c1 = _WENO_OPTIMAL_WEIGHT_1_; c2 = _WENO_OPTIMAL_WEIGHT_2_; c3 = _WENO_OPTIMAL_WEIGHT_3_; } /* calculate WENO weights */ _WENOWeights_v_YC_((ww1LF+p*nvars),(ww2LF+p*nvars),(ww3LF+p*nvars),c1,c2,c3,m3LF,m2LF,m1LF,p1LF,p2LF,weno,nvars); _WENOWeights_v_YC_((ww1RF+p*nvars),(ww2RF+p*nvars),(ww3RF+p*nvars),c1,c2,c3,m3RF,m2RF,m1RF,p1RF,p2RF,weno,nvars); _WENOWeights_v_YC_((ww1LU+p*nvars),(ww2LU+p*nvars),(ww3LU+p*nvars),c1,c2,c3,m3LU,m2LU,m1LU,p1LU,p2LU,weno,nvars); _WENOWeights_v_YC_((ww1RU+p*nvars),(ww2RU+p*nvars),(ww3RU+p*nvars),c1,c2,c3,m3RU,m2RU,m1RU,p1RU,p2RU,weno,nvars); } } return(0); }
GB_unaryop__minv_fp32_uint64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_fp32_uint64 // op(A') function: GB_tran__minv_fp32_uint64 // C type: float // A type: uint64_t // cast: float cij = (float) aij // unaryop: cij = (1.0F)/aij #define GB_ATYPE \ uint64_t #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = (1.0F)/x ; // casting #define GB_CASTING(z, x) \ float z = (float) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_FP32 || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_fp32_uint64 ( float *restrict Cx, const uint64_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_fp32_uint64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
first_private.c
#include <stdio.h> #include <omp.h> int main() { int i = 10; #pragma omp parallel private(i) { printf("%d\n", i); // Garbage Value } printf("\n\n"); #pragma omp parallel for firstprivate(i) { for (int x=0; x < 5; x++) { i = x; printf("%d\n", i); } } printf("\n\n"); #pragma omp parallel { printf("%d\n", i); // Value of i doesn't change here } }
GB_subassign_zombie.c
//------------------------------------------------------------------------------ // GB_subassign_zombie: C(I,J)<!,repl> = empty ; using S //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // Method 00: C(I,J)<!,repl> = empty ; using S // M: NULL // Mask_comp: true // C_replace: true // accum: any (present or not; result is the same) // A: any (scalar or matrix; result is the same) // S: constructed // C: not bitmap #include "GB_subassign_methods.h" #undef GB_FREE_ALL #define GB_FREE_ALL GB_phbix_free (S) ; GrB_Info GB_subassign_zombie ( GrB_Matrix C, // input: const GrB_Index *I, const int64_t ni, const int64_t nI, const int Ikind, const int64_t Icolon [3], const GrB_Index *J, const int64_t nj, const int64_t nJ, const int Jkind, const int64_t Jcolon [3], GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- ASSERT (!GB_IS_BITMAP (C)) ; ASSERT (!GB_IS_FULL (C)) ; //-------------------------------------------------------------------------- // S = C(I,J) //-------------------------------------------------------------------------- GrB_Info info ; struct GB_Matrix_opaque S_header ; GrB_Matrix S = GB_clear_static_header (&S_header) ; GB_OK (GB_subassign_symbolic (S, C, I, ni, J, nj, false, Context)) ; ASSERT (GB_JUMBLED_OK (S)) ; // S can be returned as jumbled //-------------------------------------------------------------------------- // get inputs //-------------------------------------------------------------------------- const int64_t *restrict Sx = (int64_t *) S->x ; int64_t *restrict Ci = C->i ; //-------------------------------------------------------------------------- // Method 00: C(I,J)<!,repl> = empty ; using S //-------------------------------------------------------------------------- // Time: Optimal, O(nnz(S)), assuming S has already been constructed. //-------------------------------------------------------------------------- // Parallel: all entries in S can be processed entirely in parallel. //-------------------------------------------------------------------------- // All entries in C(I,J) are deleted. The result does not depend on A or // the scalar. int64_t snz = GB_NNZ (S) ; GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; int nthreads = GB_nthreads (snz, chunk, nthreads_max) ; int64_t nzombies = C->nzombies ; int64_t pS ; #pragma omp parallel for num_threads(nthreads) schedule(static) \ reduction(+:nzombies) for (pS = 0 ; pS < snz ; pS++) { // S (inew,jnew) is a pointer back into C (I(inew), J(jnew)) int64_t pC = Sx [pS] ; int64_t i = Ci [pC] ; // ----[X A 0] or [X . 0]----------------------------------------------- // action: ( X ): still a zombie // ----[C A 0] or [C . 0]----------------------------------------------- // action: C_repl: ( delete ): becomes a zombie if (!GB_IS_ZOMBIE (i)) { nzombies++ ; Ci [pC] = GB_FLIP (i) ; } } //-------------------------------------------------------------------------- // free workspace and return result //-------------------------------------------------------------------------- C->nzombies = nzombies ; GB_FREE_ALL ; return (GrB_SUCCESS) ; }
blake2bp.c
/* BLAKE2 reference source code package - optimized C implementations Written in 2012 by Samuel Neves <sneves@dei.uc.pt> To the extent possible under law, the author(s) have dedicated all copyright and related and neighboring rights to this software to the public domain worldwide. This software is distributed without any warranty. You should have received a copy of the CC0 Public Domain Dedication along with this software. If not, see <http://creativecommons.org/publicdomain/zero/1.0/>. */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <stdint.h> #if defined(_OPENMP) #include <omp.h> #endif #include "blake2.h" #include "blake2-impl.h" #define PARALLELISM_DEGREE 4 static int blake2bp_init_leaf( blake2b_state *S, uint8_t outlen, uint8_t keylen, uint64_t offset ) { blake2b_param P[1]; P->digest_length = outlen; P->key_length = keylen; P->fanout = PARALLELISM_DEGREE; P->depth = 2; store32(&P->leaf_length, 0); store64(&P->node_offset, offset); P->node_depth = 0; P->inner_length = BLAKE2B_OUTBYTES; memset( P->reserved, 0, sizeof( P->reserved ) ); memset( P->salt, 0, sizeof( P->salt ) ); memset( P->personal, 0, sizeof( P->personal ) ); blake2b_init_param( S, P ); S->outlen = P->inner_length; return 0; } static int blake2bp_init_root( blake2b_state *S, uint8_t outlen, uint8_t keylen ) { blake2b_param P[1]; P->digest_length = outlen; P->key_length = keylen; P->fanout = PARALLELISM_DEGREE; P->depth = 2; store32(&P->leaf_length, 0); store64(&P->node_offset, 0); P->node_depth = 1; P->inner_length = BLAKE2B_OUTBYTES; memset( P->reserved, 0, sizeof( P->reserved ) ); memset( P->salt, 0, sizeof( P->salt ) ); memset( P->personal, 0, sizeof( P->personal ) ); blake2b_init_param( S, P ); S->outlen = P->digest_length; return 0; } int blake2bp_init( blake2bp_state *S, size_t outlen ) { if( !outlen || outlen > BLAKE2B_OUTBYTES ) return -1; memset( S->buf, 0, sizeof( S->buf ) ); S->buflen = 0; if( blake2bp_init_root( S->R, ( uint8_t ) outlen, 0 ) < 0 ) return -1; for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) if( blake2bp_init_leaf( S->S[i], ( uint8_t ) outlen, 0, i ) < 0 ) return -1; S->R->last_node = 1; S->S[PARALLELISM_DEGREE - 1]->last_node = 1; S->outlen = ( uint8_t ) outlen; return 0; } int blake2bp_init_key( blake2bp_state *S, size_t outlen, const void *key, size_t keylen ) { if( !outlen || outlen > BLAKE2B_OUTBYTES ) return -1; if( !key || !keylen || keylen > BLAKE2B_KEYBYTES ) return -1; memset( S->buf, 0, sizeof( S->buf ) ); S->buflen = 0; if( blake2bp_init_root( S->R, ( uint8_t ) outlen, ( uint8_t ) keylen ) < 0 ) return -1; for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) if( blake2bp_init_leaf( S->S[i], ( uint8_t ) outlen, ( uint8_t ) keylen, i ) < 0 ) return -1; S->R->last_node = 1; S->S[PARALLELISM_DEGREE - 1]->last_node = 1; S->outlen = ( uint8_t ) outlen; { uint8_t block[BLAKE2B_BLOCKBYTES]; memset( block, 0, BLAKE2B_BLOCKBYTES ); memcpy( block, key, keylen ); for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) blake2b_update( S->S[i], block, BLAKE2B_BLOCKBYTES ); secure_zero_memory( block, BLAKE2B_BLOCKBYTES ); /* Burn the key from stack */ } return 0; } int blake2bp_update( blake2bp_state *S, const uint8_t *in, size_t inlen ) { size_t left = S->buflen; size_t fill = sizeof( S->buf ) - left; if( left && inlen >= fill ) { memcpy( S->buf + left, in, fill ); for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) blake2b_update( S->S[i], S->buf + i * BLAKE2B_BLOCKBYTES, BLAKE2B_BLOCKBYTES ); in += fill; inlen -= fill; left = 0; } #if defined(_OPENMP) omp_set_num_threads(PARALLELISM_DEGREE); #pragma omp parallel shared(S) #else for( size_t id__ = 0; id__ < PARALLELISM_DEGREE; ++id__ ) #endif { #if defined(_OPENMP) size_t id__ = ( size_t ) omp_get_thread_num(); #endif size_t inlen__ = inlen; const uint8_t *in__ = ( const uint8_t * )in; in__ += id__ * BLAKE2B_BLOCKBYTES; while( inlen__ >= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES ) { blake2b_update( S->S[id__], in__, BLAKE2B_BLOCKBYTES ); in__ += PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES; inlen__ -= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES; } } in += inlen - inlen % ( PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES ); inlen %= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES; if( inlen > 0 ) memcpy( S->buf + left, in, inlen ); S->buflen = ( uint32_t ) left + ( uint32_t ) inlen; return 0; } int blake2bp_final( blake2bp_state *S, uint8_t *out, size_t outlen ) { uint8_t hash[PARALLELISM_DEGREE][BLAKE2B_OUTBYTES]; if(S->outlen != outlen) return -1; for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) { if( S->buflen > i * BLAKE2B_BLOCKBYTES ) { size_t left = S->buflen - i * BLAKE2B_BLOCKBYTES; if( left > BLAKE2B_BLOCKBYTES ) left = BLAKE2B_BLOCKBYTES; blake2b_update( S->S[i], S->buf + i * BLAKE2B_BLOCKBYTES, left ); } blake2b_final( S->S[i], hash[i], BLAKE2B_OUTBYTES ); } for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) blake2b_update( S->R, hash[i], BLAKE2B_OUTBYTES ); return blake2b_final( S->R, out, outlen ); } int blake2bp( uint8_t *out, const void *in, const void *key, size_t outlen, size_t inlen, size_t keylen ) { uint8_t hash[PARALLELISM_DEGREE][BLAKE2B_OUTBYTES]; blake2b_state S[PARALLELISM_DEGREE][1]; blake2b_state FS[1]; /* Verify parameters */ if ( NULL == in && inlen > 0 ) return -1; if ( NULL == out ) return -1; if ( NULL == key && keylen > 0) return -1; if( !outlen || outlen > BLAKE2B_OUTBYTES ) return -1; if( keylen > BLAKE2B_KEYBYTES ) return -1; for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) if( blake2bp_init_leaf( S[i], ( uint8_t ) outlen, ( uint8_t ) keylen, i ) < 0 ) return -1; S[PARALLELISM_DEGREE - 1]->last_node = 1; // mark last node if( keylen > 0 ) { uint8_t block[BLAKE2B_BLOCKBYTES]; memset( block, 0, BLAKE2B_BLOCKBYTES ); memcpy( block, key, keylen ); for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) blake2b_update( S[i], block, BLAKE2B_BLOCKBYTES ); secure_zero_memory( block, BLAKE2B_BLOCKBYTES ); /* Burn the key from stack */ } #if defined(_OPENMP) omp_set_num_threads(PARALLELISM_DEGREE); #pragma omp parallel shared(S,hash) #else for( size_t id__ = 0; id__ < PARALLELISM_DEGREE; ++id__ ) #endif { #if defined(_OPENMP) size_t id__ = ( size_t ) omp_get_thread_num(); #endif size_t inlen__ = inlen; const uint8_t *in__ = ( const uint8_t * )in; in__ += id__ * BLAKE2B_BLOCKBYTES; while( inlen__ >= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES ) { blake2b_update( S[id__], in__, BLAKE2B_BLOCKBYTES ); in__ += PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES; inlen__ -= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES; } if( inlen__ > id__ * BLAKE2B_BLOCKBYTES ) { const size_t left = inlen__ - id__ * BLAKE2B_BLOCKBYTES; const size_t len = left <= BLAKE2B_BLOCKBYTES ? left : BLAKE2B_BLOCKBYTES; blake2b_update( S[id__], in__, len ); } blake2b_final( S[id__], hash[id__], BLAKE2B_OUTBYTES ); } if( blake2bp_init_root( FS, ( uint8_t ) outlen, ( uint8_t ) keylen ) < 0 ) return -1; FS->last_node = 1; // Mark as last node for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) blake2b_update( FS, hash[i], BLAKE2B_OUTBYTES ); return blake2b_final( FS, out, outlen ); }
functions.c
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #include "functions.h" //compute a*b mod p safely unsigned int modprod(unsigned int a, unsigned int b, unsigned int p) { unsigned int za = a; unsigned int ab = 0; while (b > 0) { if (b%2 == 1) ab = (ab + za) % p; za = (2 * za) % p; b /= 2; } return ab; } //compute a^b mod p safely unsigned int modExp(unsigned int a, unsigned int b, unsigned int p) { unsigned int z = a; unsigned int aExpb = 1; while (b > 0) { if (b%2 == 1) aExpb = modprod(aExpb, z, p); z = modprod(z, z, p); b /= 2; } return aExpb; } //returns either 0 or 1 randomly unsigned int randomBit() { return rand()%2; } //returns a random integer which is between 2^{n-1} and 2^{n} unsigned int randXbitInt(unsigned int n) { unsigned int r = 1; for (unsigned int i=0; i<n-1; i++) { r = r*2 + randomBit(); } return r; } //tests for primality and return 1 if N is probably prime and 0 if N is composite unsigned int isProbablyPrime(unsigned int N) { if (N%2==0) return 0; //not interested in even numbers (including 2) unsigned int NsmallPrimes = 168; unsigned int smallPrimeList[168] = {2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227, 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281, 283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353, 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487, 491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569, 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631, 641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701, 709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773, 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857, 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937, 941, 947, 953, 967, 971, 977, 983, 991, 997}; //before using a probablistic primality check, check directly using the small primes list for (unsigned int n=1;n<NsmallPrimes;n++) { if (N==smallPrimeList[n]) return 1; //true if (N%smallPrimeList[n]==0) return 0; //false } //if we're testing a large number switch to Miller-Rabin primality test unsigned int r = 0; unsigned int d = N-1; while (d%2 == 0) { d /= 2; r += 1; } for (unsigned int n=0;n<NsmallPrimes;n++) { unsigned int k = smallPrimeList[n]; unsigned int x = modExp(k,d,N); if ((x==1) || (x==N-1)) continue; for (unsigned int i=1;i<r-1;i++) { x = modprod(x,x,N); if (x == 1) return 0; //false if (x == N-1) break; } // see whether we left the loop becasue x==N-1 if (x == N-1) continue; return 0; //false } return 1; //true } //Finds a generator of Z_p using the assumption that p=2*q+1 unsigned int findGenerator(unsigned int p) { unsigned int g; unsigned int q = (p-1)/2; do { //make a random number 1<= g < p g = randXbitInt(32)%p; //could also have passed n to findGenerator } while (g==0 || (modExp(g,q,p)==1) || (modExp(g,2,p)==1)); return g; } void setupElGamal(unsigned int n, unsigned int *p, unsigned int *g, unsigned int *h, unsigned int *x) { /* Use isProbablyPrime and randomXbitInt to find a new random n-bit prime number which satisfies p=2*q+1 where q is also prime */ unsigned int q; do { *p = randXbitInt(n); q = (*p-1)/2; } while (!isProbablyPrime(*p) || !isProbablyPrime(q)); /* Use the fact that p=2*q+1 to quickly find a generator */ *g = findGenerator(*p); //pick a secret key, x *x = randXbitInt(n)%(*p); //compute h *h = modExp(*g,*x,*p); printf("ElGamal Setup successful.\n"); printf("p = %u. \n", *p); printf("g = %u is a generator of Z_%u \n", *g, *p); printf("Secret key: x = %u \n", *x); printf("h = g^x = %u\n", *h); printf("\n"); } void ElGamalEncrypt(unsigned int *m, unsigned int *a, unsigned int Nints, unsigned int p, unsigned int g, unsigned int h) { /* Q2.1 Parallelize this function with OpenMP */ #pragma omp parallel for for (unsigned int i=0; i<Nints;i++) { //pick y in Z_p randomly unsigned int y; do { y = randXbitInt(32)%p; } while (y==0); //dont allow y=0 //compute a = g^y a[i] = modExp(g,y,p); //compute s = h^y unsigned int s = modExp(h,y,p); //encrypt m by multiplying with s m[i] = modprod(m[i],s,p); } } void ElGamalDecrypt(unsigned int *m, unsigned int *a, unsigned int Nints, unsigned int p, unsigned int x) { /* Q2.1 Parallelize this function with OpenMP */ #pragma omp parallel for for (unsigned int i=0; i<Nints;i++) { //compute s = a^x unsigned int s = modExp(a[i],x,p); //compute s^{-1} = s^{p-2} unsigned int invS = modExp(s,p-2,p); //decrypt message by multplying by invS m[i] = modprod(m[i],invS,p); } } //Pad the end of string so its length is divisible by Nchars // Assume there is enough allocated storage for the padded string void padString(unsigned char* string, unsigned int charsPerInt) { /* Q1.2 Complete this function */ if(strlen(string) % charsPerInt != 0){ int initlen = strlen(string); int i = 1; string[initlen] = ' '; while((initlen+i) % charsPerInt != 0){ strcat(string, " "); i++; } strcat(string, "\00"); } } void convertStringToZ(unsigned char *string, unsigned int Nchars, unsigned int *Z, unsigned int Nints) { /* Q1.3 Complete this function */ /* Q2.2 Parallelize this function with OpenMP */ unsigned int cpi = Nchars/Nints; #pragma omp parallel for for(int i = 0; i < Nints; i++){ Z[i] = 0; for(int j = 0; j < cpi; j++){ Z[i] += (unsigned int) string[cpi*i+j] << (8*(cpi-j-1)); } //printf("%u || %u = %u\n", (unsigned int) string[cpi*i], (unsigned int) string[cpi*i+1], Z[i]); } } void convertZToString(unsigned int *Z, unsigned int Nints, unsigned char *string, unsigned int Nchars) { /* Q1.4 Complete this function */ /* Q2.2 Parallelize this function with OpenMP */ unsigned int cpi = Nchars/Nints; #pragma omp parallel for for(int i = 0; i < Nints; i++){ for(int j = 0; j < cpi; j++){ string[i*cpi+j] = ' '; } for(int j = 0; j < cpi; j++){ string[i*cpi+j] = (Z[i] >> (8*(cpi-j-1))) & 0xFF; } } }
ft_ao.c
/* Copyright 2014-2018 The PySCF Developers. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. * * Fourier transformed AO pair * \int e^{-i Gv \cdot r} i(r) * j(r) dr^3 * * eval_gz, b, gxyz, gs: * - when eval_gz is GTO_Gv_uniform_orth * > b (reciprocal vectors) is diagonal 3x3 matrix * > Gv k-space grids = dot(b.T,gxyz) * > gxyz[3,nGv] = (kx[:nGv], ky[:nGv], kz[:nGv]) * > gs[3]: The number of G-vectors along each direction (nGv=gs[0]*gs[1]*gs[2]). * - when eval_gz is GTO_Gv_uniform_nonorth * > b is 3x3 matrix = 2\pi * scipy.linalg.inv(cell.lattice_vectors).T * > Gv k-space grids = dot(b.T,gxyz) * > gxyz[3,nGv] = (kx[:nGv], ky[:nGv], kz[:nGv]) * > gs[3]: The number of *positive* G-vectors along each direction. * - when eval_gz is GTO_Gv_general * only Gv is needed * - when eval_gz is GTO_Gv_nonuniform_orth * > b is the basic G value for each cartesian component * Gx = b[:gs[0]] * Gy = b[gs[0]:gs[0]+gs[1]] * Gz = b[gs[0]+gs[1]:] * > gs[3]: Number of basic G values along each direction. * > gxyz[3,nGv] are used to index the basic G value * > Gv is not used */ #include <stdlib.h> #include <string.h> #include <math.h> #include <assert.h> #include <complex.h> #include "config.h" #include "cint.h" #include "gto/ft_ao.h" #define SQRTPI 1.7724538509055160272981674833411451 #define EXPCUTOFF 100 #define NCTRMAX 72 double CINTsquare_dist(const double *r1, const double *r2); double CINTcommon_fac_sp(int l); /* * Pyscf-1.5 (and older) use libcint function CINTinit_int1e_EnvVars and * CINTg1e_index_xyz. It's unsafe since the CINTEnvVars type was redefined * in ft_ao.h. Copy the contents of CINTinit_int1e_EnvVars and * CINTg1e_index_xyz here. */ #define IINC 0 #define JINC 1 #define GSHIFT 4 #define POS_E1 5 #define RYS_ROOTS 6 #define TENSOR 7 void GTO_ft_init1e_envs(CINTEnvVars *envs, int *ng, int *shls, int *atm, int natm, int *bas, int nbas, double *env) { envs->natm = natm; envs->nbas = nbas; envs->atm = atm; envs->bas = bas; envs->env = env; envs->shls = shls; const int i_sh = shls[0]; const int j_sh = shls[1]; envs->i_l = bas(ANG_OF, i_sh); envs->j_l = bas(ANG_OF, j_sh); envs->x_ctr[0] = bas(NCTR_OF, i_sh); envs->x_ctr[1] = bas(NCTR_OF, j_sh); envs->nfi = (envs->i_l+1)*(envs->i_l+2)/2; envs->nfj = (envs->j_l+1)*(envs->j_l+2)/2; envs->nf = envs->nfi * envs->nfj; envs->common_factor = 1; envs->gbits = ng[GSHIFT]; envs->ncomp_e1 = ng[POS_E1]; envs->ncomp_tensor = ng[TENSOR]; envs->li_ceil = envs->i_l + ng[IINC]; envs->lj_ceil = envs->j_l + ng[JINC]; if (ng[RYS_ROOTS] > 0) { envs->nrys_roots = ng[RYS_ROOTS]; } else { envs->nrys_roots = (envs->li_ceil + envs->lj_ceil)/2 + 1; } envs->ri = env + atm(PTR_COORD, bas(ATOM_OF, i_sh)); envs->rj = env + atm(PTR_COORD, bas(ATOM_OF, j_sh)); int dli, dlj; if (envs->li_ceil < envs->lj_ceil) { dli = envs->li_ceil + 1; dlj = envs->li_ceil + envs->lj_ceil + 1; } else { dli = envs->li_ceil + envs->lj_ceil + 1; dlj = envs->lj_ceil + 1; } envs->g_stride_i = 1; envs->g_stride_j = dli; envs->g_size = dli * dlj; envs->lk_ceil = 1; envs->ll_ceil = 1; envs->g_stride_k = 0; envs->g_stride_l = 0; } #define CART_MAX 128 // > (ANG_MAX*(ANG_MAX+1)/2) void CINTcart_comp(int *nx, int *ny, int *nz, const int lmax); static void _g2c_index_xyz(int *idx, const CINTEnvVars *envs) { int i_l = envs->i_l; int j_l = envs->j_l; int nfi = envs->nfi; int nfj = envs->nfj; int di = envs->g_stride_i; int dj = envs->g_stride_j; int i, j, n; int ofx, ofjx; int ofy, ofjy; int ofz, ofjz; int i_nx[CART_MAX], i_ny[CART_MAX], i_nz[CART_MAX]; int j_nx[CART_MAX], j_ny[CART_MAX], j_nz[CART_MAX]; CINTcart_comp(i_nx, i_ny, i_nz, i_l); CINTcart_comp(j_nx, j_ny, j_nz, j_l); ofx = 0; ofy = envs->g_size; ofz = envs->g_size * 2; n = 0; for (j = 0; j < nfj; j++) { ofjx = ofx + dj * j_nx[j]; ofjy = ofy + dj * j_ny[j]; ofjz = ofz + dj * j_nz[j]; for (i = 0; i < nfi; i++) { idx[n+0] = ofjx + di * i_nx[i]; idx[n+1] = ofjy + di * i_ny[i]; idx[n+2] = ofjz + di * i_nz[i]; n += 3; } } } static const int _LEN_CART[] = { 1, 3, 6, 10, 15, 21, 28, 36, 45, 55, 66, 78, 91, 105, 120, 136 }; static const int _CUM_LEN_CART[] = { 1, 4, 10, 20, 35, 56, 84, 120, 165, 220, 286, 364, 455, 560, 680, 816, }; /* * WHEREX_IF_L_INC1 = [xyz2addr(x,y,z) for x,y,z in loopcart(L_MAX) if x > 0] * WHEREY_IF_L_INC1 = [xyz2addr(x,y,z) for x,y,z in loopcart(L_MAX) if y > 0] * WHEREZ_IF_L_INC1 = [xyz2addr(x,y,z) for x,y,z in loopcart(L_MAX) if z > 0] */ static const int _UPIDY[] = { 1, 3, 4, 6, 7, 8, 10, 11, 12, 13, 15, 16, 17, 18, 19, 21, 22, 23, 24, 25, 26, 28, 29, 30, 31, 32, 33, 34, 36, 37, 38, 39, 40, 41, 42, 43, 45, 46, 47, 48, 49, 50, 51, 52, 53, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 91, 92, 93, 94, 95, 96, 97, 98, 99,100,101,102,103, 105,106,107,108,109,110,111,112,113,114,115,116,117,118, 120,121,122,123,124,125,126,127,128,129,130,131,132,133,134, }; static const int _UPIDZ[] = { 2, 4, 5, 7, 8, 9, 11, 12, 13, 14, 16, 17, 18, 19, 20, 22, 23, 24, 25, 26, 27, 29, 30, 31, 32, 33, 34, 35, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 92, 93, 94, 95, 96, 97, 98, 99,100,101,102,103,104, 106,107,108,109,110,111,112,113,114,115,116,117,118,119, 121,122,123,124,125,126,127,128,129,130,131,132,133,134,135, }; /* * _DOWN_XYZ, _DOWN_XYZ_ORDER, _DOWN1, _DOWN2 labels the index in the 1D * recursive relation f_{i+1} = i/2a * f_{i-1} + X * f_{i} * _DOWN_XYZ_ORDER i in i/2a * _DOWN2 index of f_{i-1} * _DOWN_XYZ index of X * _DOWN1 index of f_{i} */ static const int _DOWN1[] = { -1, 0, 0, 0, 0, 1, 2, 1, 2, 2, 0, 0, 0, 3, 4, 5, 3, 3, 5, 5, 0, 0, 0, 3, 2, 5, 6, 7, 8, 9, 6, 6, 8, 9, 9, 0, 0, 0, 3, 2, 5, 6, 3, 5, 9, 10, 11, 12, 13, 14, 10, 10, 12, 13, 14, 14, 0, 0, 0, 3, 2, 5, 6, 3, 5, 9, 10, 6, 12, 9, 14, 15, 16, 17, 18, 19, 20, 15, 15, 17, 18, 19, 20, 20, 0, 0, 0, 3, 2, 5, 6, 3, 5, 9, 10, 6, 12, 9, 14, 15, 10, 17, 18, 14, 20, 21, 22, 23, 24, 25, 26, 27, 21, 21, 23, 24, 25, 26, 27, 27, 0, 0, 0, 3, 2, 5, 6, 3, 5, 9, 10, 6, 12, 9, 14, 15, 10, 17, 18, 14, 20, 21, 15, 23, 24, 25, 20, 27, 28, 29, 30, 31, 32, 33, 34, 35, 28, 28, 30, 31, 32, 33, 34, 35, 35, 0, 0, 0, 3, 2, 5, 6, 3, 5, 9, 10, 6, 12, 9, 14, 15, 10, 17, 18, 14, 20, 21, 15, 23, 24, 25, 20, 27, 28, 21, 30, 31, 32, 33, 27, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 36, 36, 38, 39, 40, 41, 42, 43, 44, 44, 0, 0, 0, 3, 2, 5, 6, 3, 5, 9, 10, 6, 12, 9, 14, 15, 10, 17, 18, 14, 20, 21, 15, 23, 24, 25, 20, 27, 28, 21, 30, 31, 32, 33, 27, 35, 36, 28, 38, 39, 40, 41, 42, 35, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 45, 45, 47, 48, 49, 50, 51, 52, 53, 54, 54, 0, 0, 0, 3, 2, 5, 6, 3, 5, 9, 10, 6, 12, 9, 14, 15, 10, 17, 18, 14, 20, 21, 15, 23, 24, 25, 20, 27, 28, 21, 30, 31, 32, 33, 27, 35, 36, 28, 38, 39, 40, 41, 42, 35, 44, 45, 36, 47, 48, 49, 50, 51, 52, 44, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 55, 55, 57, 58, 59, 60, 61, 62, 63, 64, 65, 65, 0, 0, 0, 3, 2, 5, 6, 3, 5, 9, 10, 6, 12, 9, 14, 15, 10, 17, 18, 14, 20, 21, 15, 23, 24, 25, 20, 27, 28, 21, 30, 31, 32, 33, 27, 35, 36, 28, 38, 39, 40, 41, 42, 35, 44, 45, 36, 47, 48, 49, 50, 51, 52, 44, 54, 55, 45, 57, 58, 59, 60, 61, 62, 63, 54, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 66, 66, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 77, 0, 0, 0, 3, 2, 5, 6, 3, 5, 9, 10, 6, 12, 9, 14, 15, 10, 17, 18, 14, 20, 21, 15, 23, 24, 25, 20, 27, 28, 21, 30, 31, 32, 33, 27, 35, 36, 28, 38, 39, 40, 41, 42, 35, 44, 45, 36, 47, 48, 49, 50, 51, 52, 44, 54, 55, 45, 57, 58, 59, 60, 61, 62, 63, 54, 65, 66, 55, 68, 69, 70, 71, 72, 73, 74, 75, 65, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 78, 78, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 90, 0, 0, 0, 3, 2, 5, 6, 3, 5, 9, 10, 6, 12, 9, 14, 15, 10, 17, 18, 14, 20, 21, 15, 23, 24, 25, 20, 27, 28, 21, 30, 31, 32, 33, 27, 35, 36, 28, 38, 39, 40, 41, 42, 35, 44, 45, 36, 47, 48, 49, 50, 51, 52, 44, 54, 55, 45, 57, 58, 59, 60, 61, 62, 63, 54, 65, 66, 55, 68, 69, 70, 71, 72, 73, 74, 75, 65, 77, 78, 66, 80, 81, 82, 83, 84, 85, 86, 87, 88, 77, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 91, 91, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 104, 0, 0, 0, 3, 2, 5, 6, 3, 5, 9, 10, 6, 12, 9, 14, 15, 10, 17, 18, 14, 20, 21, 15, 23, 24, 25, 20, 27, 28, 21, 30, 31, 32, 33, 27, 35, 36, 28, 38, 39, 40, 41, 42, 35, 44, 45, 36, 47, 48, 49, 50, 51, 52, 44, 54, 55, 45, 57, 58, 59, 60, 61, 62, 63, 54, 65, 66, 55, 68, 69, 70, 71, 72, 73, 74, 75, 65, 77, 78, 66, 80, 81, 82, 83, 84, 85, 86, 87, 88, 77, 90, 91, 78, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 90, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 105, 105, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 119, }; static const int _DOWN2[] = { -1, -1, -1, -1, 0, -1, -1, 0, -1, 0, 0, -1, -1, -1, -1, -1, 1, -1, -1, 2, 0, -1, -1, 3, -1, 5, -1, -1, -1, -1, 3, -1, 5, -1, 5, 0, -1, -1, 3, -1, 5, 6, -1, -1, 9, -1, -1, -1, -1, -1, 6, -1, 8, 9, -1, 9, 0, -1, -1, 3, -1, 5, 6, -1, -1, 9, 10, -1, 12, -1, 14, -1, -1, -1, -1, -1, -1, 10, -1, 12, 13, 14, -1, 14, 0, -1, -1, 3, -1, 5, 6, -1, -1, 9, 10, -1, 12, -1, 14, 15, -1, 17, 18, -1, 20, -1, -1, -1, -1, -1, -1, -1, 15, -1, 17, 18, 19, 20, -1, 20, 0, -1, -1, 3, -1, 5, 6, -1, -1, 9, 10, -1, 12, -1, 14, 15, -1, 17, 18, -1, 20, 21, -1, 23, 24, 25, -1, 27, -1, -1, -1, -1, -1, -1, -1, -1, 21, -1, 23, 24, 25, 26, 27, -1, 27, 0, -1, -1, 3, -1, 5, 6, -1, -1, 9, 10, -1, 12, -1, 14, 15, -1, 17, 18, -1, 20, 21, -1, 23, 24, 25, -1, 27, 28, -1, 30, 31, 32, 33, -1, 35, -1, -1, -1, -1, -1, -1, -1, -1, -1, 28, -1, 30, 31, 32, 33, 34, 35, -1, 35, 0, -1, -1, 3, -1, 5, 6, -1, -1, 9, 10, -1, 12, -1, 14, 15, -1, 17, 18, -1, 20, 21, -1, 23, 24, 25, -1, 27, 28, -1, 30, 31, 32, 33, -1, 35, 36, -1, 38, 39, 40, 41, 42, -1, 44, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 36, -1, 38, 39, 40, 41, 42, 43, 44, -1, 44, 0, -1, -1, 3, -1, 5, 6, -1, -1, 9, 10, -1, 12, -1, 14, 15, -1, 17, 18, -1, 20, 21, -1, 23, 24, 25, -1, 27, 28, -1, 30, 31, 32, 33, -1, 35, 36, -1, 38, 39, 40, 41, 42, -1, 44, 45, -1, 47, 48, 49, 50, 51, 52, -1, 54, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 45, -1, 47, 48, 49, 50, 51, 52, 53, 54, -1, 54, 0, -1, -1, 3, -1, 5, 6, -1, -1, 9, 10, -1, 12, -1, 14, 15, -1, 17, 18, -1, 20, 21, -1, 23, 24, 25, -1, 27, 28, -1, 30, 31, 32, 33, -1, 35, 36, -1, 38, 39, 40, 41, 42, -1, 44, 45, -1, 47, 48, 49, 50, 51, 52, -1, 54, 55, -1, 57, 58, 59, 60, 61, 62, 63, -1, 65, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 55, -1, 57, 58, 59, 60, 61, 62, 63, 64, 65, -1, 65, 0, -1, -1, 3, -1, 5, 6, -1, -1, 9, 10, -1, 12, -1, 14, 15, -1, 17, 18, -1, 20, 21, -1, 23, 24, 25, -1, 27, 28, -1, 30, 31, 32, 33, -1, 35, 36, -1, 38, 39, 40, 41, 42, -1, 44, 45, -1, 47, 48, 49, 50, 51, 52, -1, 54, 55, -1, 57, 58, 59, 60, 61, 62, 63, -1, 65, 66, -1, 68, 69, 70, 71, 72, 73, 74, 75, -1, 77, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 66, -1, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, -1, 77, 0, -1, -1, 3, -1, 5, 6, -1, -1, 9, 10, -1, 12, -1, 14, 15, -1, 17, 18, -1, 20, 21, -1, 23, 24, 25, -1, 27, 28, -1, 30, 31, 32, 33, -1, 35, 36, -1, 38, 39, 40, 41, 42, -1, 44, 45, -1, 47, 48, 49, 50, 51, 52, -1, 54, 55, -1, 57, 58, 59, 60, 61, 62, 63, -1, 65, 66, -1, 68, 69, 70, 71, 72, 73, 74, 75, -1, 77, 78, -1, 80, 81, 82, 83, 84, 85, 86, 87, 88, -1, 90, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 78, -1, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, -1, 90, 0, -1, -1, 3, -1, 5, 6, -1, -1, 9, 10, -1, 12, -1, 14, 15, -1, 17, 18, -1, 20, 21, -1, 23, 24, 25, -1, 27, 28, -1, 30, 31, 32, 33, -1, 35, 36, -1, 38, 39, 40, 41, 42, -1, 44, 45, -1, 47, 48, 49, 50, 51, 52, -1, 54, 55, -1, 57, 58, 59, 60, 61, 62, 63, -1, 65, 66, -1, 68, 69, 70, 71, 72, 73, 74, 75, -1, 77, 78, -1, 80, 81, 82, 83, 84, 85, 86, 87, 88, -1, 90, 91, -1, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, -1, 104, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 91, -1, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, -1, 104, }; static const int _DOWN_XYZ[] = { 2, 0, 1, 2, 0, 0, 0, 1, 1, 2, 0, 1, 2, 0, 0, 0, 1, 2, 1, 2, 0, 1, 2, 0, 1, 0, 0, 0, 0, 0, 1, 2, 1, 1, 2, 0, 1, 2, 0, 1, 0, 0, 2, 1, 0, 0, 0, 0, 0, 0, 1, 2, 1, 1, 1, 2, 0, 1, 2, 0, 1, 0, 0, 2, 1, 0, 0, 2, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 2, 1, 1, 1, 1, 2, 0, 1, 2, 0, 1, 0, 0, 2, 1, 0, 0, 2, 0, 1, 0, 0, 2, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 1, 1, 1, 1, 1, 2, 0, 1, 2, 0, 1, 0, 0, 2, 1, 0, 0, 2, 0, 1, 0, 0, 2, 0, 0, 1, 0, 0, 2, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 1, 1, 1, 1, 1, 1, 2, 0, 1, 2, 0, 1, 0, 0, 2, 1, 0, 0, 2, 0, 1, 0, 0, 2, 0, 0, 1, 0, 0, 2, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 1, 1, 1, 1, 1, 1, 1, 2, 0, 1, 2, 0, 1, 0, 0, 2, 1, 0, 0, 2, 0, 1, 0, 0, 2, 0, 0, 1, 0, 0, 2, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 2, 0, 1, 2, 0, 1, 0, 0, 2, 1, 0, 0, 2, 0, 1, 0, 0, 2, 0, 0, 1, 0, 0, 2, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 0, 1, 2, 0, 1, 0, 0, 2, 1, 0, 0, 2, 0, 1, 0, 0, 2, 0, 0, 1, 0, 0, 2, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 0, 1, 2, 0, 1, 0, 0, 2, 1, 0, 0, 2, 0, 1, 0, 0, 2, 0, 0, 1, 0, 0, 2, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 0, 1, 2, 0, 1, 0, 0, 2, 1, 0, 0, 2, 0, 1, 0, 0, 2, 0, 0, 1, 0, 0, 2, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 0, 1, 2, 0, 1, 0, 0, 2, 1, 0, 0, 2, 0, 1, 0, 0, 2, 0, 0, 1, 0, 0, 2, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, }; static const int _DOWN_XYZ_ORDER[] = { 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 2, 0, 0, 0, 0, 0, 2, 0, 0, 2, 3, 0, 0, 1, 0, 1, 0, 0, 0, 0, 3, 0, 1, 0, 3, 4, 0, 0, 2, 0, 2, 1, 0, 0, 1, 0, 0, 0, 0, 0, 4, 0, 2, 1, 0, 4, 5, 0, 0, 3, 0, 3, 2, 0, 0, 2, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 5, 0, 3, 2, 1, 0, 5, 6, 0, 0, 4, 0, 4, 3, 0, 0, 3, 2, 0, 2, 0, 2, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 6, 0, 4, 3, 2, 1, 0, 6, 7, 0, 0, 5, 0, 5, 4, 0, 0, 4, 3, 0, 3, 0, 3, 2, 0, 2, 2, 0, 2, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 7, 0, 5, 4, 3, 2, 1, 0, 7, 8, 0, 0, 6, 0, 6, 5, 0, 0, 5, 4, 0, 4, 0, 4, 3, 0, 3, 3, 0, 3, 2, 0, 2, 2, 2, 0, 2, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 0, 6, 5, 4, 3, 2, 1, 0, 8, 9, 0, 0, 7, 0, 7, 6, 0, 0, 6, 5, 0, 5, 0, 5, 4, 0, 4, 4, 0, 4, 3, 0, 3, 3, 3, 0, 3, 2, 0, 2, 2, 2, 2, 0, 2, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 9, 0, 7, 6, 5, 4, 3, 2, 1, 0, 9, 10, 0, 0, 8, 0, 8, 7, 0, 0, 7, 6, 0, 6, 0, 6, 5, 0, 5, 5, 0, 5, 4, 0, 4, 4, 4, 0, 4, 3, 0, 3, 3, 3, 3, 0, 3, 2, 0, 2, 2, 2, 2, 2, 0, 2, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10, 0, 8, 7, 6, 5, 4, 3, 2, 1, 0, 10, 11, 0, 0, 9, 0, 9, 8, 0, 0, 8, 7, 0, 7, 0, 7, 6, 0, 6, 6, 0, 6, 5, 0, 5, 5, 5, 0, 5, 4, 0, 4, 4, 4, 4, 0, 4, 3, 0, 3, 3, 3, 3, 3, 0, 3, 2, 0, 2, 2, 2, 2, 2, 2, 0, 2, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11, 0, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 11, 12, 0, 0, 10, 0, 10, 9, 0, 0, 9, 8, 0, 8, 0, 8, 7, 0, 7, 7, 0, 7, 6, 0, 6, 6, 6, 0, 6, 5, 0, 5, 5, 5, 5, 0, 5, 4, 0, 4, 4, 4, 4, 4, 0, 4, 3, 0, 3, 3, 3, 3, 3, 3, 0, 3, 2, 0, 2, 2, 2, 2, 2, 2, 2, 0, 2, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12, 0, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 12, 13, 0, 0, 11, 0, 11, 10, 0, 0, 10, 9, 0, 9, 0, 9, 8, 0, 8, 8, 0, 8, 7, 0, 7, 7, 7, 0, 7, 6, 0, 6, 6, 6, 6, 0, 6, 5, 0, 5, 5, 5, 5, 5, 0, 5, 4, 0, 4, 4, 4, 4, 4, 4, 0, 4, 3, 0, 3, 3, 3, 3, 3, 3, 3, 0, 3, 2, 0, 2, 2, 2, 2, 2, 2, 2, 2, 0, 2, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 13, 0, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 13, 14, 0, 0, 12, 0, 12, 11, 0, 0, 11, 10, 0, 10, 0, 10, 9, 0, 9, 9, 0, 9, 8, 0, 8, 8, 8, 0, 8, 7, 0, 7, 7, 7, 7, 0, 7, 6, 0, 6, 6, 6, 6, 6, 0, 6, 5, 0, 5, 5, 5, 5, 5, 5, 0, 5, 4, 0, 4, 4, 4, 4, 4, 4, 4, 0, 4, 3, 0, 3, 3, 3, 3, 3, 3, 3, 3, 0, 3, 2, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 0, 2, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 14, 0, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 14, }; #define WHEREX_IF_L_INC1(i) i #define WHEREY_IF_L_INC1(i) _UPIDY[i] #define WHEREZ_IF_L_INC1(i) _UPIDZ[i] #define STARTX_IF_L_DEC1(i) 0 #define STARTY_IF_L_DEC1(i) ((i<2)?0:_LEN_CART[i-2]) #define STARTZ_IF_L_DEC1(i) (_LEN_CART[i-1]-1) #define ADDR_IF_L_DEC1(l,m) _DOWN1[_CUM_LEN_CART[l-1]+m] #define ADDR_IF_L_DEC2(l,m) _DOWN2[_CUM_LEN_CART[l-1]+m] #define DEC1_XYZ(l,m) _DOWN_XYZ[_CUM_LEN_CART[l-1]+m] #define DEC1_XYZ_ORDER(l,m) _DOWN_XYZ_ORDER[_CUM_LEN_CART[l-1]+m] static int vrr1d_withGv(double complex *g, double *rijri, double aij, double *Gv, int topl, size_t NGv) { int cumxyz = 1; if (topl == 0) { return cumxyz; } double *kx = Gv; double *ky = kx + NGv; double *kz = ky + NGv; int i, n, m, l; double a2; double complex *p0, *p1, *p2, *dec1, *dec2; double *ka2 = malloc(sizeof(double) * NGv*3); double *kxa2 = ka2; double *kya2 = kxa2 + NGv; double *kza2 = kya2 + NGv; a2 = .5 / aij; for (n = 0; n < NGv; n++) { kxa2[n] = kx[n] * a2; kya2[n] = ky[n] * a2; kza2[n] = kz[n] * a2; } p0 = g + NGv; for (n = 0; n < NGv; n++) { p0[ n] = (rijri[0] - kxa2[n]*_Complex_I) * g[n]; p0[NGv +n] = (rijri[1] - kya2[n]*_Complex_I) * g[n]; p0[NGv*2+n] = (rijri[2] - kza2[n]*_Complex_I) * g[n]; } cumxyz += 3; for (l = 1; l < topl; l++) { p0 = g + cumxyz * NGv; dec1 = p0 - _LEN_CART[l ] * NGv; dec2 = dec1 - _LEN_CART[l-1] * NGv; for (i = 0; i < _LEN_CART[l+1]; i++) { m = DEC1_XYZ(l+1,i); kxa2 = ka2 + m * NGv; p1 = dec1 + ADDR_IF_L_DEC1(l+1,i) * NGv; p2 = dec2 + ADDR_IF_L_DEC2(l+1,i) * NGv; if (ADDR_IF_L_DEC2(l+1,i) < 0) { for (n = 0; n < NGv; n++) { p0[n] = (rijri[m]-kxa2[n]*_Complex_I)*p1[n]; } } else { a2 = .5/aij * DEC1_XYZ_ORDER(l+1,i); for (n = 0; n < NGv; n++) { p0[n] = a2*p2[n] + (rijri[m]-kxa2[n]*_Complex_I)*p1[n]; } } p0 += NGv; } cumxyz += _LEN_CART[l+1]; } free(ka2); return cumxyz; } /* * if li = 3, lj = 1 * (10 + X*00 -> 01): * gs + X*fs -> fp */ static void vrr2d_ket_inc1_withGv(double complex *out, const double complex *g, double *rirj, int li, int lj, size_t NGv) { if (lj == 0) { memcpy(out, g, sizeof(double complex)*_LEN_CART[li]*NGv); return; } const int row_10 = _LEN_CART[li+1]; const int row_00 = _LEN_CART[li ]; const int col_00 = _LEN_CART[lj-1]; const double complex *g00 = g; const double complex *g10 = g + row_00*col_00*NGv; int i, j, n; const double complex *p00, *p10; double complex *p01 = out; for (j = STARTX_IF_L_DEC1(lj); j < _LEN_CART[lj-1]; j++) { for (i = 0; i < row_00; i++) { p00 = g00 + (j*row_00+i) * NGv; p10 = g10 + (j*row_10+WHEREX_IF_L_INC1(i)) * NGv; for (n = 0; n < NGv; n++) { p01[n] = p10[n] + rirj[0] * p00[n]; } p01 += NGv; } } for (j = STARTY_IF_L_DEC1(lj); j < _LEN_CART[lj-1]; j++) { for (i = 0; i < row_00; i++) { p00 = g00 + (j*row_00+i) * NGv; p10 = g10 + (j*row_10+WHEREY_IF_L_INC1(i)) * NGv; for (n = 0; n < NGv; n++) { p01[n] = p10[n] + rirj[1] * p00[n]; } p01 += NGv; } } j = STARTZ_IF_L_DEC1(lj); if (j < _LEN_CART[lj-1]) { for (i = 0; i < row_00; i++) { p00 = g00 + (j*row_00+i) * NGv; p10 = g10 + (j*row_10+WHEREZ_IF_L_INC1(i)) * NGv; for (n = 0; n < NGv; n++) { p01[n] = p10[n] + rirj[2] * p00[n]; } p01 += NGv; } } } /* * transpose i, j when storing into out */ static void vrr2d_inc1_swapij(double complex *out, const double complex *g, double *rirj, int li, int lj, size_t NGv) { if (lj == 0) { memcpy(out, g, sizeof(double complex)*_LEN_CART[li]*NGv); return; } const int row_01 = _LEN_CART[lj]; const int row_10 = _LEN_CART[li+1]; const int row_00 = _LEN_CART[li ]; const int col_00 = _LEN_CART[lj-1]; const double complex *g00 = g; const double complex *g10 = g + row_00*col_00*NGv; int i, j, n; const double complex *p00, *p10; double complex *p01 = out; for (j = STARTX_IF_L_DEC1(lj); j < _LEN_CART[lj-1]; j++) { for (i = 0; i < row_00; i++) { p00 = g00 + (j*row_00+i) * NGv; p10 = g10 + (j*row_10+WHEREX_IF_L_INC1(i)) * NGv; p01 = out + i*row_01 * NGv; for (n = 0; n < NGv; n++) { p01[n] = p10[n] + rirj[0] * p00[n]; } } out += NGv; } for (j = STARTY_IF_L_DEC1(lj); j < _LEN_CART[lj-1]; j++) { for (i = 0; i < row_00; i++) { p00 = g00 + (j*row_00+i) * NGv; p10 = g10 + (j*row_10+WHEREY_IF_L_INC1(i)) * NGv; p01 = out + i*row_01 * NGv; for (n = 0; n < NGv; n++) { p01[n] = p10[n] + rirj[1] * p00[n]; } } out += NGv; } j = STARTZ_IF_L_DEC1(lj); if (j < _LEN_CART[lj-1]) { for (i = 0; i < row_00; i++) { p00 = g00 + (j*row_00+i) * NGv; p10 = g10 + (j*row_10+WHEREZ_IF_L_INC1(i)) * NGv; p01 = out + i*row_01 * NGv; for (n = 0; n < NGv; n++) { p01[n] = p10[n] + rirj[2] * p00[n]; } } } } static void vrr2d_withGv(double complex *out, double complex *g, double complex *gbuf2, const int li, const int lj, const double *ri, const double *rj, size_t NGv) { const int nmax = li + lj; double complex *g00, *g01, *gswap, *pg00, *pg01; int row_01, col_01, row_00, col_00; int i, j; double rirj[3]; rirj[0] = ri[0] - rj[0]; rirj[1] = ri[1] - rj[1]; rirj[2] = ri[2] - rj[2]; g00 = gbuf2; g01 = g; for (j = 1; j < lj; j++) { gswap = g00; g00 = g01; g01 = gswap; pg00 = g00; pg01 = g01; for (i = li; i <= nmax-j; i++) { vrr2d_ket_inc1_withGv(pg01, pg00, rirj, i, j, NGv); row_01 = _LEN_CART[i]; col_01 = _LEN_CART[j]; row_00 = _LEN_CART[i ]; col_00 = _LEN_CART[j-1]; pg00 += row_00*col_00 * NGv; pg01 += row_01*col_01 * NGv; } } vrr2d_ket_inc1_withGv(out, g01, rirj, li, lj, NGv); } /* (0,li+lj) => (li,lj) */ static void hrr2d_withGv(double complex *out, double complex *g, double complex *gbuf2, const int li, const int lj, const double *ri, const double *rj, size_t NGv) { const int nmax = li + lj; double complex *g00, *g01, *gswap, *pg00, *pg01; int row_01, col_01, row_00, col_00; int i, j; double rjri[3]; rjri[0] = rj[0] - ri[0]; rjri[1] = rj[1] - ri[1]; rjri[2] = rj[2] - ri[2]; g00 = gbuf2; g01 = g; for (i = 1; i < li; i++) { gswap = g00; g00 = g01; g01 = gswap; pg00 = g00; pg01 = g01; for (j = lj; j <= nmax-i; j++) { vrr2d_ket_inc1_withGv(pg01, pg00, rjri, j, i, NGv); row_01 = _LEN_CART[j]; col_01 = _LEN_CART[i]; row_00 = _LEN_CART[j ]; col_00 = _LEN_CART[i-1]; pg00 += row_00*col_00 * NGv; pg01 += row_01*col_01 * NGv; } } vrr2d_inc1_swapij(out, g01, rjri, lj, li, NGv); } /* * Recursive relation */ static void aopair_rr_igtj_early(double complex *g, double ai, double aj, CINTEnvVars *envs, FPtr_eval_gz eval_gz, double complex fac, double *Gv, double *b, int *gxyz, int *gs, size_t NGv) { const int topl = envs->li_ceil + envs->lj_ceil; const double aij = ai + aj; const double *ri = envs->ri; const double *rj = envs->rj; double rij[3], rijri[3]; rij[0] = (ai * ri[0] + aj * rj[0]) / aij; rij[1] = (ai * ri[1] + aj * rj[1]) / aij; rij[2] = (ai * ri[2] + aj * rj[2]) / aij; rijri[0] = rij[0] - ri[0]; rijri[1] = rij[1] - ri[1]; rijri[2] = rij[2] - ri[2]; (*eval_gz)(g, aij, rij, fac, Gv, b, gxyz, gs, NGv); vrr1d_withGv(g, rijri, aij, Gv, topl, NGv); } static void aopair_rr_iltj_early(double complex *g, double ai, double aj, CINTEnvVars *envs, FPtr_eval_gz eval_gz, double complex fac, double *Gv, double *b, int *gxyz, int *gs, size_t NGv) { const int topl = envs->li_ceil + envs->lj_ceil; const double aij = ai + aj; const double *ri = envs->ri; const double *rj = envs->rj; double rij[3], rijrj[3]; rij[0] = (ai * ri[0] + aj * rj[0]) / aij; rij[1] = (ai * ri[1] + aj * rj[1]) / aij; rij[2] = (ai * ri[2] + aj * rj[2]) / aij; rijrj[0] = rij[0] - rj[0]; rijrj[1] = rij[1] - rj[1]; rijrj[2] = rij[2] - rj[2]; (*eval_gz)(g, aij, rij, fac, Gv, b, gxyz, gs, NGv); vrr1d_withGv(g, rijrj, aij, Gv, topl, NGv); } static void aopair_rr_igtj_lazy(double complex *g, double ai, double aj, CINTEnvVars *envs, FPtr_eval_gz eval_gz, double complex fac, double *Gv, double *b, int *gxyz, int *gs, size_t NGv) { const int nmax = envs->li_ceil + envs->lj_ceil; const int lj = envs->lj_ceil; const int dj = envs->g_stride_j; const double aij = ai + aj; const double a2 = .5 / aij; const double *ri = envs->ri; const double *rj = envs->rj; double rij[3], rirj[3], rijri[3]; double complex *gx = g; double complex *gy = gx + envs->g_size * NGv; double complex *gz = gy + envs->g_size * NGv; double *kx = Gv; double *ky = kx + NGv; double *kz = ky + NGv; size_t off0, off1, off2; int i, j, n, ptr; double ia2; rirj[0] = ri[0] - rj[0]; rirj[1] = ri[1] - rj[1]; rirj[2] = ri[2] - rj[2]; rij[0] = (ai * ri[0] + aj * rj[0]) / aij; rij[1] = (ai * ri[1] + aj * rj[1]) / aij; rij[2] = (ai * ri[2] + aj * rj[2]) / aij; rijri[0] = rij[0] - ri[0]; rijri[1] = rij[1] - ri[1]; rijri[2] = rij[2] - ri[2]; for (n = 0; n < NGv; n++) { gx[n] = 1; gy[n] = 1; } (*eval_gz)(gz, aij, rij, fac, Gv, b, gxyz, gs, NGv); if (nmax > 0) { for (n = 0; n < NGv; n++) { if (gz[n] != 0) { gx[NGv+n] = (rijri[0] - kx[n]*a2*_Complex_I) * gx[n]; gy[NGv+n] = (rijri[1] - ky[n]*a2*_Complex_I) * gy[n]; gz[NGv+n] = (rijri[2] - kz[n]*a2*_Complex_I) * gz[n]; } } } for (i = 1; i < nmax; i++) { off0 = (i-1) * NGv; off1 = i * NGv; off2 = (i+1) * NGv; ia2 = i * a2; for (n = 0; n < NGv; n++) { if (gz[n] != 0) { gx[off2+n] = ia2 * gx[off0+n] + (rijri[0] - kx[n]*a2*_Complex_I) * gx[off1+n]; gy[off2+n] = ia2 * gy[off0+n] + (rijri[1] - ky[n]*a2*_Complex_I) * gy[off1+n]; gz[off2+n] = ia2 * gz[off0+n] + (rijri[2] - kz[n]*a2*_Complex_I) * gz[off1+n]; } } } for (j = 1; j <= lj; j++) { ptr = dj * j; for (i = ptr; i <= ptr + nmax - j; i++) { off0 = i * NGv - dj * NGv; // [i, j-1] off1 = (i+1) * NGv - dj * NGv; // [i+1,j-1] off2 = i * NGv; // [i, j ] for (n = 0; n < NGv; n++) { if (gz[n] != 0) { gx[off2+n] = gx[off1+n] + rirj[0] * gx[off0+n]; gy[off2+n] = gy[off1+n] + rirj[1] * gy[off0+n]; gz[off2+n] = gz[off1+n] + rirj[2] * gz[off0+n]; } } } } } static void aopair_rr_iltj_lazy(double complex *g, double ai, double aj, CINTEnvVars *envs, FPtr_eval_gz eval_gz, double complex fac, double *Gv, double *b, int *gxyz, int *gs, size_t NGv) { const int nmax = envs->li_ceil + envs->lj_ceil; const int li = envs->li_ceil; const int dj = envs->g_stride_j; const double aij = ai + aj; const double a2 = .5 / aij; const double *ri = envs->ri; const double *rj = envs->rj; double rij[3], rirj[3], rijrj[3]; double complex *gx = g; double complex *gy = gx + envs->g_size * NGv; double complex *gz = gy + envs->g_size * NGv; double *kx = Gv; double *ky = kx + NGv; double *kz = ky + NGv; size_t off0, off1, off2; int i, j, n; double ia2; rirj[0] = rj[0] - ri[0]; rirj[1] = rj[1] - ri[1]; rirj[2] = rj[2] - ri[2]; rij[0] = (ai * ri[0] + aj * rj[0]) / aij; rij[1] = (ai * ri[1] + aj * rj[1]) / aij; rij[2] = (ai * ri[2] + aj * rj[2]) / aij; rijrj[0] = rij[0] - rj[0]; rijrj[1] = rij[1] - rj[1]; rijrj[2] = rij[2] - rj[2]; for (n = 0; n < NGv; n++) { gx[n] = 1; gy[n] = 1; } (*eval_gz)(gz, aij, rij, fac, Gv, b, gxyz, gs, NGv); if (nmax > 0) { off0 = dj * NGv; for (n = 0; n < NGv; n++) { if (gz[n] != 0) { gx[off0+n] = (rijrj[0] - kx[n]*a2*_Complex_I) * gx[n]; gy[off0+n] = (rijrj[1] - ky[n]*a2*_Complex_I) * gy[n]; gz[off0+n] = (rijrj[2] - kz[n]*a2*_Complex_I) * gz[n]; } } } for (i = 1; i < nmax; i++) { off0 = (i-1) * dj * NGv; off1 = i * dj * NGv; off2 = (i+1) * dj * NGv; ia2 = i * a2; for (n = 0; n < NGv; n++) { if (gz[n] != 0) { gx[off2+n] = ia2 * gx[off0+n] + (rijrj[0] - kx[n]*a2*_Complex_I) * gx[off1+n]; gy[off2+n] = ia2 * gy[off0+n] + (rijrj[1] - ky[n]*a2*_Complex_I) * gy[off1+n]; gz[off2+n] = ia2 * gz[off0+n] + (rijrj[2] - kz[n]*a2*_Complex_I) * gz[off1+n]; } } } for (i = 1; i <= li; i++) { for (j = 0; j <= nmax - i; j++) { off0 = (i-1) * NGv + j * dj * NGv; // [i-1,j ] off1 = (i-1) * NGv + (j+1) * dj * NGv; // [i-1,j+1] off2 = i * NGv + j * dj * NGv; // [i ,j ] for (n = 0; n < NGv; n++) { if (gz[n] != 0) { gx[off2+n] = gx[off1+n] + rirj[0] * gx[off0+n]; gy[off2+n] = gy[off1+n] + rirj[1] * gy[off0+n]; gz[off2+n] = gz[off1+n] + rirj[2] * gz[off0+n]; } } } } } static void inner_prod(double complex *g, double complex *gout, int *idx, const CINTEnvVars *envs, double *Gv, size_t NGv, int empty) { int ix, iy, iz, n, k; double complex *gz = g + envs->g_size * NGv * 2; if (empty) { for (n = 0; n < envs->nf; n++) { ix = idx[n*3+0]; iy = idx[n*3+1]; iz = idx[n*3+2]; for (k = 0; k < NGv; k++) { if (gz[k] != 0) { gout[n*NGv+k] = g[ix*NGv+k] * g[iy*NGv+k] * g[iz*NGv+k]; } else { gout[n*NGv+k] = 0; } } } } else { for (n = 0; n < envs->nf; n++) { ix = idx[n*3+0]; iy = idx[n*3+1]; iz = idx[n*3+2]; for (k = 0; k < NGv; k++) { if (gz[k] != 0) { gout[n*NGv+k] += g[ix*NGv+k] * g[iy*NGv+k] * g[iz*NGv+k]; } } } } } static void prim_to_ctr(double complex *gc, const size_t nf, double complex *gp, const int nprim, const int nctr, const double *coeff, int empty) { size_t n, i; double c; if (empty) { for (n = 0; n < nctr; n++) { c = coeff[nprim*n]; for (i = 0; i < nf; i++) { gc[i] = gp[i] * c; } gc += nf; } } else { for (n = 0; n < nctr; n++) { c = coeff[nprim*n]; if (c != 0) { for (i = 0; i < nf; i++) { gc[i] += gp[i] * c; } } gc += nf; } } } static void transpose(double complex *out, double complex *in, int nf, int comp, size_t NGv) { size_t n, k, ic; double complex *pin; for (ic = 0; ic < comp; ic++) { for (n = 0; n < nf; n++) { pin = in + (n*comp+ic) * NGv; for (k = 0; k < NGv; k++) { out[n*NGv+k] = pin[k]; } } out += nf * NGv; } } static const int _GBUFSIZE[] = { 1, 4, 10, 10, 20, 48, 20, 35, 75, 150, 35, 56, 108, 216, 384, 56, 84, 147, 294, 510, 850, 84, 120, 192, 384, 654, 1090, 1640, 120, 165, 243, 486, 816, 1360, 2040, 3030 }; #define bufsize(i,j) _GBUFSIZE[((i>=j) ? (i*(i+1)/2+j) : (j*(j+1)/2+i))] int GTO_aopair_early_contract(double complex *out, CINTEnvVars *envs, FPtr_eval_gz eval_gz, double complex fac, double *Gv, double *b, int *gxyz, int *gs, size_t NGv) { const int *shls = envs->shls; const int *bas = envs->bas; const double *env = envs->env; const int i_sh = shls[0]; const int j_sh = shls[1]; const int i_l = envs->i_l; const int j_l = envs->j_l; const int i_ctr = envs->x_ctr[0]; const int j_ctr = envs->x_ctr[1]; const int i_prim = bas(NPRIM_OF, i_sh); const int j_prim = bas(NPRIM_OF, j_sh); const int nf = envs->nf; const double *ri = envs->ri; const double *rj = envs->rj; const double *ai = env + bas(PTR_EXP, i_sh); const double *aj = env + bas(PTR_EXP, j_sh); const double *ci = env + bas(PTR_COEFF, i_sh); const double *cj = env + bas(PTR_COEFF, j_sh); double fac1i, fac1j; double aij, dij, eij; int ip, jp, n; int empty[2] = {1, 1}; int *jempty = empty + 0; int *iempty = empty + 1; const size_t len1 = bufsize(i_l,j_l) * NGv; const size_t leni = len1 * i_ctr; const size_t lenj = len1 * i_ctr * j_ctr; double complex *gctrj = malloc(sizeof(double complex)*(lenj+leni+len1)); double complex *g = gctrj + lenj; double complex *gctri, *g1d; if (j_ctr == 1) { gctri = gctrj; iempty = jempty; } else { gctri = g; g += leni; } g1d = g; void (*aopair_rr)(); int offset_g1d; if (i_l >= j_l) { aopair_rr = aopair_rr_igtj_early; offset_g1d = _CUM_LEN_CART[i_l] - _LEN_CART[i_l]; } else { aopair_rr = aopair_rr_iltj_early; offset_g1d = _CUM_LEN_CART[j_l] - _LEN_CART[j_l]; } int len_g1d = _CUM_LEN_CART[i_l+j_l] - offset_g1d; double rrij = CINTsquare_dist(ri, rj); double fac1 = SQRTPI * M_PI * CINTcommon_fac_sp(i_l) * CINTcommon_fac_sp(j_l); *jempty = 1; for (jp = 0; jp < j_prim; jp++) { if (j_ctr == 1) { fac1j = fac1 * cj[jp]; } else { fac1j = fac1; *iempty = 1; } for (ip = 0; ip < i_prim; ip++) { aij = ai[ip] + aj[jp]; eij = (ai[ip] * aj[jp] / aij) * rrij; if (eij > EXPCUTOFF) { continue; } dij = exp(-eij) / (aij * sqrt(aij)); fac1i = fac1j * dij; (*aopair_rr)(g, ai[ip], aj[jp], envs, eval_gz, fac*fac1i, Gv, b, gxyz, gs, NGv); prim_to_ctr(gctri, len_g1d*NGv, g1d+offset_g1d*NGv, i_prim, i_ctr, ci+ip, *iempty); *iempty = 0; } if (!*iempty) { if (j_ctr > 1) { prim_to_ctr(gctrj, i_ctr*len_g1d*NGv, gctri, j_prim,j_ctr, cj+jp, *jempty); } *jempty = 0; } } if (!*jempty) { g1d = gctrj; for (n = 0; n < i_ctr*j_ctr; n++) { if (i_l >= j_l) { vrr2d_withGv(out+n*nf*NGv, g1d, gctrj+lenj, envs->li_ceil, envs->lj_ceil, ri, rj, NGv); } else { hrr2d_withGv(out+n*nf*NGv, g1d, gctrj+lenj, envs->li_ceil, envs->lj_ceil, ri, rj, NGv); } g1d += len_g1d * NGv; } } free(gctrj); return !*jempty; } int GTO_aopair_lazy_contract(double complex *gctr, CINTEnvVars *envs, FPtr_eval_gz eval_gz, double complex fac, double *Gv, double *b, int *gxyz, int *gs, size_t NGv) { const int *shls = envs->shls; const int *bas = envs->bas; const double *env = envs->env; const int i_sh = shls[0]; const int j_sh = shls[1]; const int i_l = envs->i_l; const int j_l = envs->j_l; const int i_ctr = envs->x_ctr[0]; const int j_ctr = envs->x_ctr[1]; const int i_prim = bas(NPRIM_OF, i_sh); const int j_prim = bas(NPRIM_OF, j_sh); const int n_comp = envs->ncomp_e1 * envs->ncomp_tensor; const int nf = envs->nf; const double *ri = envs->ri; const double *rj = envs->rj; const double *ai = env + bas(PTR_EXP, i_sh); const double *aj = env + bas(PTR_EXP, j_sh); const double *ci = env + bas(PTR_COEFF, i_sh); const double *cj = env + bas(PTR_COEFF, j_sh); double fac1i, fac1j; double aij, dij, eij; int ip, jp; int empty[3] = {1, 1, 1}; int *jempty = empty + 0; int *iempty = empty + 1; int *gempty = empty + 2; const size_t len1 = envs->g_size * 3 * (1<<envs->gbits) * NGv; const size_t leng = nf * n_comp * NGv; const size_t leni = nf * i_ctr * n_comp * NGv; size_t lenj = 0; if (n_comp > 1) { lenj = nf * i_ctr * j_ctr * n_comp * NGv; } double complex *g = malloc(sizeof(double complex) * (len1+leng+leni+lenj)); double complex *g1 = g + len1; double complex *gout, *gctri, *gctrj; if (n_comp == 1) { gctrj = gctr; } else { gctrj = g1; g1 += lenj; } if (j_ctr == 1) { gctri = gctrj; iempty = jempty; } else { gctri = g1; g1 += leni; } if (i_ctr == 1) { gout = gctri; gempty = iempty; } else { gout = g1; } void (*aopair_rr)(); if (i_l >= j_l) { aopair_rr = aopair_rr_igtj_lazy; } else { aopair_rr = aopair_rr_iltj_lazy; } int *idx = malloc(sizeof(int) * nf * 3); _g2c_index_xyz(idx, envs); double rrij = CINTsquare_dist(ri, rj); double fac1 = SQRTPI * M_PI * CINTcommon_fac_sp(i_l) * CINTcommon_fac_sp(j_l); *jempty = 1; for (jp = 0; jp < j_prim; jp++) { envs->aj = aj[jp]; if (j_ctr == 1) { fac1j = fac1 * cj[jp]; } else { fac1j = fac1; *iempty = 1; } for (ip = 0; ip < i_prim; ip++) { envs->ai = ai[ip]; aij = ai[ip] + aj[jp]; eij = (ai[ip] * aj[jp] / aij) * rrij; if (eij > EXPCUTOFF) { continue; } dij = exp(-eij) / (aij * sqrt(aij)); if (i_ctr == 1) { fac1i = fac1j * dij * ci[ip]; } else { fac1i = fac1j * dij; } (*aopair_rr)(g, ai[ip], aj[jp], envs, eval_gz, fac*fac1i, Gv, b, gxyz, gs, NGv); (*envs->f_gout)(g, gout, idx, envs, Gv, NGv, *gempty); if (i_ctr > 1) { prim_to_ctr(gctri, nf*n_comp*NGv, gout, i_prim, i_ctr, ci+ip, *iempty); } *iempty = 0; } if (!*iempty) { if (j_ctr > 1) { prim_to_ctr(gctrj, i_ctr*nf*n_comp*NGv, gctri, j_prim, j_ctr, cj+jp, *jempty); } *jempty = 0; } } if (n_comp > 1 && !*jempty) { transpose(gctr, gctrj, nf*i_ctr*j_ctr, n_comp, NGv); } free(g); free(idx); return !*jempty; } void GTO_Gv_general(double complex *out, double aij, double *rij, double complex fac, double *Gv, double *b, int *gxyz, int *gs, size_t NGv) { double *kx = Gv; double *ky = kx + NGv; double *kz = ky + NGv; const double cutoff = EXPCUTOFF * aij * 4; int n; double kR, kk; for (n = 0; n < NGv; n++) { kk = kx[n] * kx[n] + ky[n] * ky[n] + kz[n] * kz[n]; if (kk < cutoff) { kR = kx[n] * rij[0] + ky[n] * rij[1] + kz[n] * rij[2]; out[n] = exp(-.25*kk/aij) * fac * (cos(kR) - sin(kR)*_Complex_I); } else { out[n] = 0; } } } /* * Gv = dot(b.T,gxyz) + kpt * kk = dot(Gv, Gv) * kr = dot(rij, Gv) = dot(rij,b.T, gxyz) + dot(rij,kpt) = dot(br, gxyz) + dot(rij,kpt) * out = fac * exp(-.25 * kk / aij) * (cos(kr) - sin(kr) * _Complex_I); * * b: the first 9 elements are 2\pi*inv(a^T), then 3 elements for k_{ij}, * followed by 3*NGv floats for Gbase */ void GTO_Gv_orth(double complex *out, double aij, double *rij, double complex fac, double *Gv, double *b, int *gxyz, int *gs, size_t NGv) { const int nx = gs[0]; const int ny = gs[1]; const int nz = gs[2]; double br[3]; // dot(rij, b) br[0] = rij[0] * b[0]; br[1] = rij[1] * b[4]; br[2] = rij[2] * b[8]; double *kpt = b + 9; double kr[3]; kr[0] = rij[0] * kpt[0]; kr[1] = rij[1] * kpt[1]; kr[2] = rij[2] * kpt[2]; double *Gxbase = b + 12; double *Gybase = Gxbase + nx; double *Gzbase = Gybase + ny; double *kx = Gv; double *ky = kx + NGv; double *kz = ky + NGv; double complex zbuf[nx+ny+nz]; double complex *csx = zbuf; double complex *csy = csx + nx; double complex *csz = csy + ny; double kkpool[nx+ny+nz]; double *kkx = kkpool; double *kky = kkx + nx; double *kkz = kky + ny; int *gx = gxyz; int *gy = gx + NGv; int *gz = gy + NGv; const double cutoff = EXPCUTOFF * aij * 4; int n, ix, iy, iz; double Gr; for (n = 0; n < nx+ny+nz; n++) { kkpool[n] = -1; } for (n = 0; n < NGv; n++) { ix = gx[n]; iy = gy[n]; iz = gz[n]; if (kkx[ix] < 0) { Gr = Gxbase[ix] * br[0] + kr[0]; kkx[ix] = .25 * kx[n]*kx[n] / aij; csx[ix] = exp(-kkx[ix]) * (cos(Gr)-sin(Gr)*_Complex_I); } if (kky[iy] < 0) { Gr = Gybase[iy] * br[1] + kr[1]; kky[iy] = .25 * ky[n]*ky[n] / aij; csy[iy] = exp(-kky[iy]) * (cos(Gr)-sin(Gr)*_Complex_I); } if (kkz[iz] < 0) { Gr = Gzbase[iz] * br[2] + kr[2]; kkz[iz] = .25 * kz[n]*kz[n] / aij; csz[iz] = fac * exp(-kkz[iz]) * (cos(Gr)-sin(Gr)*_Complex_I); } if (kkx[ix] + kky[iy] + kkz[iz] < cutoff) { out[n] = csx[ix] * csy[iy] * csz[iz]; } else { out[n] = 0; } } } void GTO_Gv_nonorth(double complex *out, double aij, double *rij, double complex fac, double *Gv, double *b, int *gxyz, int *gs, size_t NGv) { const int nx = gs[0]; const int ny = gs[1]; const int nz = gs[2]; double br[3]; // dot(rij, b) br[0] = rij[0] * b[0]; br[0] += rij[1] * b[1]; br[0] += rij[2] * b[2]; br[1] = rij[0] * b[3]; br[1] += rij[1] * b[4]; br[1] += rij[2] * b[5]; br[2] = rij[0] * b[6]; br[2] += rij[1] * b[7]; br[2] += rij[2] * b[8]; double *kpt = b + 9; double kr[3]; kr[0] = rij[0] * kpt[0]; kr[1] = rij[1] * kpt[1]; kr[2] = rij[2] * kpt[2]; double *Gxbase = b + 12; double *Gybase = Gxbase + nx; double *Gzbase = Gybase + ny; double *kx = Gv; double *ky = kx + NGv; double *kz = ky + NGv; double complex zbuf[nx+ny+nz]; double complex *csx = zbuf; double complex *csy = csx + nx; double complex *csz = csy + ny; char empty[nx+ny+nz]; char *xempty = empty; char *yempty = xempty + nx; char *zempty = yempty + ny; memset(empty, 1, sizeof(char)*(nx+ny+nz)); int *gx = gxyz; int *gy = gx + NGv; int *gz = gy + NGv; const double cutoff = EXPCUTOFF * aij * 4; int n, ix, iy, iz; double Gr, kk; for (n = 0; n < NGv; n++) { ix = gx[n]; iy = gy[n]; iz = gz[n]; kk = kx[n] * kx[n] + ky[n] * ky[n] + kz[n] * kz[n]; if (kk < cutoff) { ix = gx[n]; iy = gy[n]; iz = gz[n]; if (xempty[ix]) { Gr = Gxbase[ix] * br[0] + kr[0]; csx[ix] = cos(Gr)-sin(Gr)*_Complex_I; xempty[ix] = 0; } if (yempty[iy]) { Gr = Gybase[iy] * br[1] + kr[1]; csy[iy] = cos(Gr)-sin(Gr)*_Complex_I; yempty[iy] = 0; } if (zempty[iz]) { Gr = Gzbase[iz] * br[2] + kr[2]; csz[iz] = fac * (cos(Gr)-sin(Gr)*_Complex_I); zempty[iz] = 0; } out[n] = exp(-.25*kk/aij) * csx[ix]*csy[iy]*csz[iz]; } else { out[n] = 0; } } } static void zcopy_ij(double complex *out, const double complex *gctr, const int mi, const int mj, const int ni, const size_t NGv) { int i, j, k; for (j = 0; j < mj; j++) { for (i = 0; i < mi; i++) { for (k = 0; k < NGv; k++) { out[i*NGv+k] = gctr[i*NGv+k]; } } out += ni * NGv; gctr += mi * NGv; } } void GTO_ft_c2s_cart(double complex *out, double complex *gctr, int *dims, CINTEnvVars *envs, size_t NGv) { const int i_ctr = envs->x_ctr[0]; const int j_ctr = envs->x_ctr[1]; const int nfi = envs->nfi; const int nfj = envs->nfj; const int ni = nfi*i_ctr; const int nj = nfj*j_ctr; const int nf = envs->nf; int ic, jc; double complex *pout; for (jc = 0; jc < nj; jc += nfj) { for (ic = 0; ic < ni; ic += nfi) { pout = out + (dims[0] * jc + ic) * NGv; zcopy_ij(pout, gctr, nfi, nfj, dims[0], NGv); gctr += nf * NGv; } } } #define C2S(sph, nket, cart, l) \ (double complex *)CINTc2s_ket_sph((double *)(sph), nket, (double *)(cart), l) #define OF_CMPLX 2 void GTO_ft_c2s_sph(double complex *out, double complex *gctr, int *dims, CINTEnvVars *envs, size_t NGv) { const int i_l = envs->i_l; const int j_l = envs->j_l; const int i_ctr = envs->x_ctr[0]; const int j_ctr = envs->x_ctr[1]; const int di = i_l * 2 + 1; const int dj = j_l * 2 + 1; const int ni = di*i_ctr; const int nj = dj*j_ctr; const int nfi = envs->nfi; const int nf = envs->nf; int ic, jc, k; const int buflen = nfi*dj; double complex *buf1 = malloc(sizeof(double complex) * buflen*2 * NGv); double complex *buf2 = buf1 + buflen * NGv; double complex *pout, *pij, *buf; for (jc = 0; jc < nj; jc += dj) { for (ic = 0; ic < ni; ic += di) { buf = C2S(buf1, nfi*NGv*OF_CMPLX, gctr, j_l); pij = C2S(buf2, NGv*OF_CMPLX, buf, i_l); for (k = NGv; k < dj*NGv; k+=NGv) { pout = C2S(buf2+k*di, NGv*OF_CMPLX, buf+k*nfi, i_l); } pout = out + (dims[0] * jc + ic) * NGv; zcopy_ij(pout, pij, di, dj, dims[0], NGv); gctr += nf * NGv; } } free(buf1); } static void _ft_zset0(double complex *out, int *dims, int *counts, int comp, size_t NGv) { double complex *pout; int i, j, k, ic; for (ic = 0; ic < comp; ic++) { for (j = 0; j < counts[1]; j++) { pout = out + j * dims[0] * NGv; for (i = 0; i < counts[0]; i++) { for (k = 0; k < NGv; k++) { pout[i*NGv+k] = 0; } } } out += dims[0] * dims[1] * NGv; } } /************************************************* * * eval_aopair is one of GTO_aopair_early_contract, * GTO_aopair_lazy_contract * * eval_gz is one of GTO_Gv_general, GTO_Gv_uniform_orth, * GTO_Gv_uniform_nonorth, GTO_Gv_nonuniform_orth * *************************************************/ int GTO_ft_aopair_drv(double complex *out, int *dims, int (*eval_aopair)(), FPtr_eval_gz eval_gz, void (*f_c2s)(), double complex fac, double *Gv, double *b, int *gxyz, int *gs, size_t NGv, CINTEnvVars *envs) { const int i_ctr = envs->x_ctr[0]; const int j_ctr = envs->x_ctr[1]; const int n_comp = envs->ncomp_e1 * envs->ncomp_tensor; const size_t nc = envs->nf * i_ctr * j_ctr * NGv; double complex *gctr = malloc(sizeof(double complex) * nc * n_comp); if (eval_gz == NULL) { eval_gz = GTO_Gv_general; } if (eval_gz != GTO_Gv_general) { assert(gxyz != NULL); } if (eval_aopair == NULL) { const int *shls = envs->shls; const int *bas = envs->bas; const int i_sh = shls[0]; const int j_sh = shls[1]; const int i_prim = bas(NPRIM_OF, i_sh); const int j_prim = bas(NPRIM_OF, j_sh); if (i_prim*j_prim < i_ctr*j_ctr*3) { eval_aopair = GTO_aopair_lazy_contract; } else { eval_aopair = GTO_aopair_early_contract; } } int has_value = (*eval_aopair)(gctr, envs, eval_gz, fac, Gv, b, gxyz, gs, NGv); int counts[4]; if (f_c2s == &GTO_ft_c2s_sph) { counts[0] = (envs->i_l*2+1) * i_ctr; counts[1] = (envs->j_l*2+1) * j_ctr; } else { // f_c2s == &GTO_ft_c2s_cart counts[0] = envs->nfi * i_ctr; counts[1] = envs->nfj * j_ctr; } if (dims == NULL) { dims = counts; } size_t nout = dims[0] * dims[1] * NGv; int n; if (has_value) { for (n = 0; n < n_comp; n++) { (*f_c2s)(out+nout*n, gctr+nc*n, dims, envs, NGv); } } else { _ft_zset0(out, dims, counts, n_comp, NGv); } free(gctr); return has_value; } int GTO_ft_ovlp_cart(double complex *out, int *shls, int *dims, int (*eval_aopair)(), FPtr_eval_gz eval_gz, double complex fac, double *Gv, double *b, int *gxyz, int *gs, int nGv, int *atm, int natm, int *bas, int nbas, double *env) { CINTEnvVars envs; int ng[] = {0, 0, 0, 0, 0, 1, 0, 1}; GTO_ft_init1e_envs(&envs, ng, shls, atm, natm, bas, nbas, env); envs.f_gout = &inner_prod; return GTO_ft_aopair_drv(out, dims, eval_aopair, eval_gz, &GTO_ft_c2s_cart, fac, Gv, b, gxyz, gs, nGv, &envs); } int GTO_ft_ovlp_sph(double complex *out, int *shls, int *dims, int (*eval_aopair)(), FPtr_eval_gz eval_gz, double complex fac, double *Gv, double *b, int *gxyz, int *gs, int nGv, int *atm, int natm, int *bas, int nbas, double *env) { CINTEnvVars envs; int ng[] = {0, 0, 0, 0, 0, 1, 0, 1}; GTO_ft_init1e_envs(&envs, ng, shls, atm, natm, bas, nbas, env); envs.f_gout = &inner_prod; return GTO_ft_aopair_drv(out, dims, eval_aopair, eval_gz, &GTO_ft_c2s_sph, fac, Gv, b, gxyz, gs, nGv, &envs); } /************************************************* * *************************************************/ static void zcopy_s2_igtj(double complex *out, double complex *in, size_t NGv, int comp, int nij, int ip, int di, int dj) { const size_t ip1 = ip + 1; int i, j, n, ic; double complex *pin, *pout; for (ic = 0; ic < comp; ic++) { pout = out + ic * nij * NGv; for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { pin = in + NGv * (j*di+i); for (n = 0; n < NGv; n++) { pout[j*NGv+n] = pin[n]; } } pout += (ip1 + i) * NGv; } } } static void zcopy_s2_ieqj(double complex *out, double complex *in, size_t NGv, int comp, int nij, int ip, int di, int dj) { const size_t ip1 = ip + 1; int i, j, n, ic; double complex *pin, *pout; for (ic = 0; ic < comp; ic++) { pout = out + ic * nij * NGv; for (i = 0; i < di; i++) { for (j = 0; j <= i; j++) { pin = in + NGv * (j*di+i); for (n = 0; n < NGv; n++) { pout[j*NGv+n] = pin[n]; } } pout += (ip1 + i) * NGv; } } } void GTO_ft_fill_s1(int (*intor)(), int (*eval_aopair)(), FPtr_eval_gz eval_gz, double complex *mat, int comp, int ish, int jsh, double complex *buf, int *shls_slice, int *ao_loc, double complex fac, double *Gv, double *b, int *gxyz, int *gs, int nGv, int *atm, int natm, int *bas, int nbas, double *env) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const int jsh1 = shls_slice[3]; ish += ish0; jsh += jsh0; const int nrow = ao_loc[ish1] - ao_loc[ish0]; const int ncol = ao_loc[jsh1] - ao_loc[jsh0]; const size_t off = ao_loc[ish] - ao_loc[ish0] + (ao_loc[jsh] - ao_loc[jsh0]) * nrow; int shls[2] = {ish, jsh}; int dims[2] = {nrow, ncol}; (*intor)(mat+off*nGv, shls, dims, eval_aopair, eval_gz, fac, Gv, b, gxyz, gs, nGv, atm, natm, bas, nbas, env); } void GTO_ft_fill_s1hermi(int (*intor)(), int (*eval_aopair)(), FPtr_eval_gz eval_gz, double complex *mat, int comp, int ish, int jsh, double complex *buf, int *shls_slice, int *ao_loc, double complex fac, double *Gv, double *b, int *gxyz, int *gs, int nGv, int *atm, int natm, int *bas, int nbas, double *env) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const int jsh1 = shls_slice[3]; ish += ish0; jsh += jsh0; const int ip = ao_loc[ish] - ao_loc[ish0]; const int jp = ao_loc[jsh] - ao_loc[jsh0]; if (ip < jp) { return; } const int nrow = ao_loc[ish1] - ao_loc[ish0]; const int ncol = ao_loc[jsh1] - ao_loc[jsh0]; const size_t off = ao_loc[ish] - ao_loc[ish0] + (ao_loc[jsh] - ao_loc[jsh0]) * nrow; const size_t NGv = nGv; int shls[2] = {ish, jsh}; int dims[2] = {nrow, ncol}; (*intor)(mat+off*NGv, shls, dims, eval_aopair, eval_gz, fac, Gv, b, gxyz, gs, nGv, atm, natm, bas, nbas, env); if (ip != jp && ish0 == jsh0 && ish1 == jsh1) { const int di = ao_loc[ish+1] - ao_loc[ish]; const int dj = ao_loc[jsh+1] - ao_loc[jsh]; double complex *in = mat + off * NGv; double complex *out = mat + (ao_loc[jsh] - ao_loc[jsh0] + (ao_loc[ish] - ao_loc[ish0]) * nrow) * NGv; int i, j, n, ic; double complex *pout, *pin; for (ic = 0; ic < comp; ic++) { for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { pin = in + NGv * (j*nrow+i); pout = out + NGv * (i*nrow+j); for (n = 0; n < nGv; n++) { pout[n] = pin[n]; } } } out += nrow * ncol * NGv; } } } void GTO_ft_fill_s2(int (*intor)(), int (*eval_aopair)(), FPtr_eval_gz eval_gz, double complex *mat, int comp, int ish, int jsh, double complex *buf, int *shls_slice, int *ao_loc, double complex fac, double *Gv, double *b, int *gxyz, int *gs, int nGv, int *atm, int natm, int *bas, int nbas, double *env) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; ish += ish0; jsh += jsh0; const int ip = ao_loc[ish]; const int jp = ao_loc[jsh] - ao_loc[jsh0]; if (ip < jp) { return; } const int di = ao_loc[ish+1] - ao_loc[ish]; const int dj = ao_loc[jsh+1] - ao_loc[jsh]; const int i0 = ao_loc[ish0]; const size_t off0 = i0 * (i0 + 1) / 2; const size_t off = ip * (ip + 1) / 2 - off0 + jp; const size_t nij = ao_loc[ish1] * (ao_loc[ish1] + 1) / 2 - off0; const size_t NGv = nGv; int shls[2] = {ish, jsh}; int dims[2] = {di, dj}; (*intor)(buf, shls, dims, eval_aopair, eval_gz, fac, Gv, b, gxyz, gs, nGv, atm, natm, bas, nbas, env); if (ip != jp) { zcopy_s2_igtj(mat+off*NGv, buf, NGv, comp, nij, ip, di, dj); } else { zcopy_s2_ieqj(mat+off*NGv, buf, NGv, comp, nij, ip, di, dj); } } /* * Fourier transform AO pairs and add to mat (inplace) */ void GTO_ft_fill_drv(int (*intor)(), FPtr_eval_gz eval_gz, void (*fill)(), double complex *mat, int comp, int *shls_slice, int *ao_loc, double phase, double *Gv, double *b, int *gxyz, int *gs, int nGv, int *atm, int natm, int *bas, int nbas, double *env) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const int jsh1 = shls_slice[3]; const int nish = ish1 - ish0; const int njsh = jsh1 - jsh0; const double complex fac = cos(phase) + sin(phase)*_Complex_I; int (*eval_aopair)() = NULL; if (intor != &GTO_ft_ovlp_cart && intor != &GTO_ft_ovlp_sph) { eval_aopair = &GTO_aopair_lazy_contract; } #pragma omp parallel default(none) \ shared(intor, eval_gz, eval_aopair, fill, mat, comp, shls_slice, \ ao_loc, Gv, b, gxyz, gs, nGv, atm, natm, bas, nbas, env) { int i, j, ij; double complex *buf = malloc(sizeof(double complex) * NCTRMAX*NCTRMAX*comp*(size_t)nGv); #pragma omp for schedule(dynamic) for (ij = 0; ij < nish*njsh; ij++) { i = ij / njsh; j = ij % njsh; (*fill)(intor, eval_aopair, eval_gz, mat, comp, i, j, buf, shls_slice, ao_loc, fac, Gv, b, gxyz, gs, nGv, atm, natm, bas, nbas, env); } free(buf); } } /* * Given npair of shls in shls_lst, FT their AO pair value and add to * out (inplace) */ void GTO_ft_fill_shls_drv(int (*intor)(), FPtr_eval_gz eval_gz, double complex *out, int comp, int npair, int *shls_lst, int *ao_loc, double phase, double *Gv, double *b, int *gxyz, int *gs, int nGv, int *atm, int natm, int *bas, int nbas, double *env) { int n, di, dj, ish, jsh; int *ijloc = malloc(sizeof(int) * npair); ijloc[0] = 0; for (n = 1; n < npair; n++) { ish = shls_lst[n*2-2]; jsh = shls_lst[n*2-1]; di = ao_loc[ish+1] - ao_loc[ish]; dj = ao_loc[jsh+1] - ao_loc[jsh]; ijloc[n] = ijloc[n-1] + di*dj; } const double complex fac = cos(phase) + sin(phase)*_Complex_I; const size_t NGv = nGv; int (*eval_aopair)() = NULL; if (intor != &GTO_ft_ovlp_cart && intor != &GTO_ft_ovlp_sph) { eval_aopair = &GTO_aopair_lazy_contract; } #pragma omp parallel default(none) \ shared(intor, eval_gz, eval_aopair, out, comp, Gv, b, gxyz, gs, \ nGv, npair, shls_lst, ao_loc, \ atm, natm, bas, nbas, env, ijloc) \ private(n) { int ish, jsh; int dims[2]; #pragma omp for schedule(dynamic) for (n = 0; n < npair; n++) { ish = shls_lst[n*2 ]; jsh = shls_lst[n*2+1]; dims[0] = ao_loc[ish+1] - ao_loc[ish]; dims[1] = ao_loc[jsh+1] - ao_loc[jsh]; (*intor)(out+ijloc[n]*comp*NGv, shls_lst+n*2, dims, eval_aopair, eval_gz, fac, Gv, b, gxyz, gs, nGv, atm, natm, bas, nbas, env); } } free(ijloc); } /* * Reversed vrr2d. They are used by numint_uniform_grid.c */ void GTOplain_vrr2d_ket_inc1(double *out, const double *g, double *rirj, int li, int lj) { if (lj == 0) { memcpy(out, g, sizeof(double)*_LEN_CART[li]); return; } const int row_10 = _LEN_CART[li+1]; const int row_00 = _LEN_CART[li ]; const int col_00 = _LEN_CART[lj-1]; const double *g00 = g; const double *g10 = g + row_00*col_00; int i, j; const double *p00, *p10; double *p01 = out; for (j = STARTX_IF_L_DEC1(lj); j < _LEN_CART[lj-1]; j++) { for (i = 0; i < row_00; i++) { p00 = g00 + (j*row_00+i); p10 = g10 + (j*row_10+WHEREX_IF_L_INC1(i)); p01[i] = p10[0] + rirj[0] * p00[0]; } p01 += row_00; } for (j = STARTY_IF_L_DEC1(lj); j < _LEN_CART[lj-1]; j++) { for (i = 0; i < row_00; i++) { p00 = g00 + (j*row_00+i); p10 = g10 + (j*row_10+WHEREY_IF_L_INC1(i)); p01[i] = p10[0] + rirj[1] * p00[0]; } p01 += row_00; } j = STARTZ_IF_L_DEC1(lj); if (j < _LEN_CART[lj-1]) { for (i = 0; i < row_00; i++) { p00 = g00 + (j*row_00+i); p10 = g10 + (j*row_10+WHEREZ_IF_L_INC1(i)); p01[i] = p10[0] + rirj[2] * p00[0]; } } } void GTOreverse_vrr2d_ket_inc1(double *g01, double *g00, double *rirj, int li, int lj) { const int row_10 = _LEN_CART[li+1]; const int row_00 = _LEN_CART[li ]; const int col_00 = _LEN_CART[lj-1]; double *g10 = g00 + row_00*col_00; double *p00, *p10; int i, j; for (j = STARTX_IF_L_DEC1(lj); j < _LEN_CART[lj-1]; j++) { for (i = 0; i < row_00; i++) { p00 = g00 + (j*row_00+i); p10 = g10 + (j*row_10+WHEREX_IF_L_INC1(i)); p10[0] += g01[i]; p00[0] += g01[i] * rirj[0]; } g01 += row_00; } for (j = STARTY_IF_L_DEC1(lj); j < _LEN_CART[lj-1]; j++) { for (i = 0; i < row_00; i++) { p00 = g00 + (j*row_00+i); p10 = g10 + (j*row_10+WHEREY_IF_L_INC1(i)); p10[0] += g01[i]; p00[0] += g01[i] * rirj[1]; } g01 += row_00; } j = STARTZ_IF_L_DEC1(lj); if (j < _LEN_CART[lj-1]) { for (i = 0; i < row_00; i++) { p00 = g00 + (j*row_00+i); p10 = g10 + (j*row_10+WHEREZ_IF_L_INC1(i)); p10[0] += g01[i]; p00[0] += g01[i] * rirj[2]; } } }
psd.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % PPPP SSSSS DDDD % % P P SS D D % % PPPP SSS D D % % P SS D D % % P SSSSS DDDD % % % % % % Read/Write Adobe Photoshop Image Format % % % % Software Design % % Cristy % % Leonard Rosenthol % % July 1992 % % Dirk Lemstra % % December 2013 % % % % % % Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Photoshop spec @ https://www.adobe.com/devnet-apps/photoshop/fileformatashtml % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/artifact.h" #include "magick/attribute.h" #include "magick/blob.h" #include "magick/blob-private.h" #include "magick/cache.h" #include "magick/channel.h" #include "magick/colormap.h" #include "magick/colormap-private.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/constitute.h" #include "magick/enhance.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/log.h" #include "magick/magick.h" #include "magick/memory_.h" #include "magick/module.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/pixel.h" #include "magick/pixel-accessor.h" #include "magick/policy.h" #include "magick/profile.h" #include "magick/property.h" #include "magick/registry.h" #include "magick/quantum-private.h" #include "magick/static.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/thread-private.h" #ifdef MAGICKCORE_ZLIB_DELEGATE #include <zlib.h> #endif #include "psd-private.h" /* Define declaractions. */ #define MaxPSDChannels 56 #define PSDQuantum(x) (((ssize_t) (x)+1) & -2) /* Enumerated declaractions. */ typedef enum { Raw = 0, RLE = 1, ZipWithoutPrediction = 2, ZipWithPrediction = 3 } PSDCompressionType; typedef enum { BitmapMode = 0, GrayscaleMode = 1, IndexedMode = 2, RGBMode = 3, CMYKMode = 4, MultichannelMode = 7, DuotoneMode = 8, LabMode = 9 } PSDImageType; /* Typedef declaractions. */ typedef struct _ChannelInfo { short type; size_t size; } ChannelInfo; typedef struct _MaskInfo { Image *image; RectangleInfo page; unsigned char background, flags; } MaskInfo; typedef struct _LayerInfo { ChannelInfo channel_info[MaxPSDChannels]; char blendkey[4]; Image *image; MaskInfo mask; Quantum opacity; RectangleInfo page; size_t offset_x, offset_y; unsigned char clipping, flags, name[257], visible; unsigned short channels; StringInfo *info; } LayerInfo; /* Forward declarations. */ static MagickBooleanType WritePSDImage(const ImageInfo *,Image *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s P S D % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsPSD()() returns MagickTrue if the image format type, identified by the % magick string, is PSD. % % The format of the IsPSD method is: % % MagickBooleanType IsPSD(const unsigned char *magick,const size_t length) % % A description of each parameter follows: % % o magick: compare image format pattern against these bytes. % % o length: Specifies the length of the magick string. % */ static MagickBooleanType IsPSD(const unsigned char *magick,const size_t length) { if (length < 4) return(MagickFalse); if (LocaleNCompare((const char *) magick,"8BPS",4) == 0) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d P S D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadPSDImage() reads an Adobe Photoshop image file and returns it. It % allocates the memory necessary for the new Image structure and returns a % pointer to the new image. % % The format of the ReadPSDImage method is: % % Image *ReadPSDImage(image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o exception: return any errors or warnings in this structure. % */ static const char *CompositeOperatorToPSDBlendMode(Image *image) { switch (image->compose) { case ColorBurnCompositeOp: return(image->endian == LSBEndian ? "vidi" : "idiv"); case ColorDodgeCompositeOp: return(image->endian == LSBEndian ? " vid" : "div "); case ColorizeCompositeOp: return(image->endian == LSBEndian ? "rloc" : "colr"); case DarkenCompositeOp: return(image->endian == LSBEndian ? "krad" : "dark"); case DifferenceCompositeOp: return(image->endian == LSBEndian ? "ffid" : "diff"); case DissolveCompositeOp: return(image->endian == LSBEndian ? "ssid" : "diss"); case ExclusionCompositeOp: return(image->endian == LSBEndian ? "dums" : "smud"); case HardLightCompositeOp: return(image->endian == LSBEndian ? "tiLh" : "hLit"); case HardMixCompositeOp: return(image->endian == LSBEndian ? "xiMh" : "hMix"); case HueCompositeOp: return(image->endian == LSBEndian ? " euh" : "hue "); case LightenCompositeOp: return(image->endian == LSBEndian ? "etil" : "lite"); case LinearBurnCompositeOp: return(image->endian == LSBEndian ? "nrbl" : "lbrn"); case LinearDodgeCompositeOp: return(image->endian == LSBEndian ? "gddl" : "lddg"); case LinearLightCompositeOp: return(image->endian == LSBEndian ? "tiLl" : "lLit"); case LuminizeCompositeOp: return(image->endian == LSBEndian ? " mul" : "lum "); case MultiplyCompositeOp: return(image->endian == LSBEndian ? " lum" : "mul "); case OverlayCompositeOp: return(image->endian == LSBEndian ? "revo" : "over"); case PinLightCompositeOp: return(image->endian == LSBEndian ? "tiLp" : "pLit"); case SaturateCompositeOp: return(image->endian == LSBEndian ? " tas" : "sat "); case ScreenCompositeOp: return(image->endian == LSBEndian ? "nrcs" : "scrn"); case SoftLightCompositeOp: return(image->endian == LSBEndian ? "tiLs" : "sLit"); case VividLightCompositeOp: return(image->endian == LSBEndian ? "tiLv" : "vLit"); case OverCompositeOp: default: return(image->endian == LSBEndian ? "mron" : "norm"); } } /* For some reason Photoshop seems to blend semi-transparent pixels with white. This method reverts the blending. This can be disabled by setting the option 'psd:alpha-unblend' to off. */ static MagickBooleanType CorrectPSDAlphaBlend(const ImageInfo *image_info, Image *image,ExceptionInfo* exception) { const char *option; MagickBooleanType status; ssize_t y; if ((image->matte == MagickFalse) || (image->colorspace != sRGBColorspace)) return(MagickTrue); option=GetImageOption(image_info,"psd:alpha-unblend"); if (IsStringNotFalse(option) == MagickFalse) return(MagickTrue); status=MagickTrue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double gamma; gamma=QuantumScale*GetPixelAlpha(q); if (gamma != 0.0 && gamma != 1.0) { SetPixelRed(q,(GetPixelRed(q)-((1.0-gamma)*QuantumRange))/gamma); SetPixelGreen(q,(GetPixelGreen(q)-((1.0-gamma)*QuantumRange))/gamma); SetPixelBlue(q,(GetPixelBlue(q)-((1.0-gamma)*QuantumRange))/gamma); } q++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) status=MagickFalse; } return(status); } static inline CompressionType ConvertPSDCompression( PSDCompressionType compression) { switch (compression) { case RLE: return RLECompression; case ZipWithPrediction: case ZipWithoutPrediction: return ZipCompression; default: return NoCompression; } } static MagickBooleanType ApplyPSDLayerOpacity(Image *image,Quantum opacity, MagickBooleanType revert,ExceptionInfo *exception) { MagickBooleanType status; ssize_t y; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " applying layer opacity %.20g", (double) opacity); if (opacity == QuantumRange) return(MagickTrue); if (image->matte != MagickTrue) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); status=MagickTrue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if (revert == MagickFalse) SetPixelAlpha(q,(Quantum) (QuantumScale*(GetPixelAlpha(q)*opacity))); else if (opacity > 0) SetPixelAlpha(q,(Quantum) (QuantumRange*(GetPixelAlpha(q)/ (MagickRealType) opacity))); q++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) status=MagickFalse; } return(status); } static MagickBooleanType ApplyPSDOpacityMask(Image *image,const Image *mask, Quantum background,MagickBooleanType revert,ExceptionInfo *exception) { Image *complete_mask; MagickBooleanType status; MagickPixelPacket color; ssize_t y; if (image->matte == MagickFalse) return(MagickTrue); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " applying opacity mask"); complete_mask=CloneImage(image,0,0,MagickTrue,exception); if (complete_mask == (Image *) NULL) return(MagickFalse); complete_mask->matte=MagickTrue; GetMagickPixelPacket(complete_mask,&color); color.red=(MagickRealType) background; (void) SetImageColor(complete_mask,&color); status=CompositeImage(complete_mask,OverCompositeOp,mask, mask->page.x-image->page.x,mask->page.y-image->page.y); if (status == MagickFalse) { complete_mask=DestroyImage(complete_mask); return(status); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register PixelPacket *magick_restrict q; register PixelPacket *p; register ssize_t x; if (status == MagickFalse) continue; q=GetAuthenticPixels(image,0,y,image->columns,1,exception); p=GetAuthenticPixels(complete_mask,0,y,complete_mask->columns,1,exception); if ((q == (PixelPacket *) NULL) || (p == (PixelPacket *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType alpha, intensity; alpha=(MagickRealType) GetPixelAlpha(q); intensity=GetPixelIntensity(complete_mask,p); if (revert == MagickFalse) SetPixelAlpha(q,ClampToQuantum(intensity*(QuantumScale*alpha))); else if (intensity > 0) SetPixelAlpha(q,ClampToQuantum((alpha/intensity)*QuantumRange)); q++; p++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) status=MagickFalse; } complete_mask=DestroyImage(complete_mask); return(status); } static void PreservePSDOpacityMask(Image *image,LayerInfo* layer_info, ExceptionInfo *exception) { char *key; RandomInfo *random_info; StringInfo *key_info; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " preserving opacity mask"); random_info=AcquireRandomInfo(); key_info=GetRandomKey(random_info,2+1); key=(char *) GetStringInfoDatum(key_info); key[8]=(char) layer_info->mask.background; key[9]='\0'; layer_info->mask.image->page.x+=layer_info->page.x; layer_info->mask.image->page.y+=layer_info->page.y; (void) SetImageRegistry(ImageRegistryType,(const char *) key, layer_info->mask.image,exception); (void) SetImageArtifact(layer_info->image,"psd:opacity-mask", (const char *) key); key_info=DestroyStringInfo(key_info); random_info=DestroyRandomInfo(random_info); } static ssize_t DecodePSDPixels(const size_t number_compact_pixels, const unsigned char *compact_pixels,const ssize_t depth, const size_t number_pixels,unsigned char *pixels) { #define CheckNumberCompactPixels \ if (packets == 0) \ return(i); \ packets-- #define CheckNumberPixels(count) \ if (((ssize_t) i + count) > (ssize_t) number_pixels) \ return(i); \ i+=count int pixel; register ssize_t i, j; size_t length; ssize_t packets; packets=(ssize_t) number_compact_pixels; for (i=0; (packets > 1) && (i < (ssize_t) number_pixels); ) { packets--; length=(size_t) (*compact_pixels++); if (length == 128) continue; if (length > 128) { length=256-length+1; CheckNumberCompactPixels; pixel=(*compact_pixels++); for (j=0; j < (ssize_t) length; j++) { switch (depth) { case 1: { CheckNumberPixels(8); *pixels++=(pixel >> 7) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 6) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 5) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 4) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 3) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 2) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 1) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 0) & 0x01 ? 0U : 255U; break; } case 2: { CheckNumberPixels(4); *pixels++=(unsigned char) ((pixel >> 6) & 0x03); *pixels++=(unsigned char) ((pixel >> 4) & 0x03); *pixels++=(unsigned char) ((pixel >> 2) & 0x03); *pixels++=(unsigned char) ((pixel & 0x03) & 0x03); break; } case 4: { CheckNumberPixels(2); *pixels++=(unsigned char) ((pixel >> 4) & 0xff); *pixels++=(unsigned char) ((pixel & 0x0f) & 0xff); break; } default: { CheckNumberPixels(1); *pixels++=(unsigned char) pixel; break; } } } continue; } length++; for (j=0; j < (ssize_t) length; j++) { CheckNumberCompactPixels; switch (depth) { case 1: { CheckNumberPixels(8); *pixels++=(*compact_pixels >> 7) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 6) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 5) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 4) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 3) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 2) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 1) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 0) & 0x01 ? 0U : 255U; break; } case 2: { CheckNumberPixels(4); *pixels++=(*compact_pixels >> 6) & 0x03; *pixels++=(*compact_pixels >> 4) & 0x03; *pixels++=(*compact_pixels >> 2) & 0x03; *pixels++=(*compact_pixels & 0x03) & 0x03; break; } case 4: { CheckNumberPixels(2); *pixels++=(*compact_pixels >> 4) & 0xff; *pixels++=(*compact_pixels & 0x0f) & 0xff; break; } default: { CheckNumberPixels(1); *pixels++=(*compact_pixels); break; } } compact_pixels++; } } return(i); } static inline LayerInfo *DestroyLayerInfo(LayerInfo *layer_info, const ssize_t number_layers) { ssize_t i; for (i=0; i<number_layers; i++) { if (layer_info[i].image != (Image *) NULL) layer_info[i].image=DestroyImage(layer_info[i].image); if (layer_info[i].mask.image != (Image *) NULL) layer_info[i].mask.image=DestroyImage(layer_info[i].mask.image); if (layer_info[i].info != (StringInfo *) NULL) layer_info[i].info=DestroyStringInfo(layer_info[i].info); } return (LayerInfo *) RelinquishMagickMemory(layer_info); } static inline size_t GetPSDPacketSize(const Image *image) { if (image->storage_class == PseudoClass) { if (image->colors > 256) return(2); } if (image->depth > 16) return(4); if (image->depth > 8) return(2); return(1); } static inline MagickSizeType GetPSDSize(const PSDInfo *psd_info,Image *image) { if (psd_info->version == 1) return((MagickSizeType) ReadBlobLong(image)); return((MagickSizeType) ReadBlobLongLong(image)); } static inline size_t GetPSDRowSize(Image *image) { if (image->depth == 1) return(((image->columns+7)/8)*GetPSDPacketSize(image)); else return(image->columns*GetPSDPacketSize(image)); } static const char *ModeToString(PSDImageType type) { switch (type) { case BitmapMode: return "Bitmap"; case GrayscaleMode: return "Grayscale"; case IndexedMode: return "Indexed"; case RGBMode: return "RGB"; case CMYKMode: return "CMYK"; case MultichannelMode: return "Multichannel"; case DuotoneMode: return "Duotone"; case LabMode: return "L*A*B"; default: return "unknown"; } } static StringInfo *ParseImageResourceBlocks(Image *image, const unsigned char *blocks,size_t length, MagickBooleanType *has_merged_image) { const unsigned char *p; ssize_t offset; StringInfo *profile; unsigned char name_length; unsigned int count; unsigned short id, short_sans; if (length < 16) return((StringInfo *) NULL); profile=BlobToStringInfo((const void *) NULL,length); SetStringInfoDatum(profile,blocks); SetStringInfoName(profile,"8bim"); for (p=blocks; (p >= blocks) && (p < (blocks+length-7)); ) { if (LocaleNCompare((const char *) p,"8BIM",4) != 0) break; p+=4; p=PushShortPixel(MSBEndian,p,&id); p=PushCharPixel(p,&name_length); if ((name_length % 2) == 0) name_length++; p+=name_length; if (p > (blocks+length-4)) break; p=PushLongPixel(MSBEndian,p,&count); offset=(ssize_t) count; if (((p+offset) < blocks) || ((p+offset) > (blocks+length))) break; switch (id) { case 0x03ed: { char value[MaxTextExtent]; unsigned short resolution; /* Resolution info. */ if (offset < 16) break; p=PushShortPixel(MSBEndian,p,&resolution); image->x_resolution=(double) resolution; (void) FormatLocaleString(value,MaxTextExtent,"%g", image->x_resolution); (void) SetImageProperty(image,"tiff:XResolution",value); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&resolution); image->y_resolution=(double) resolution; (void) FormatLocaleString(value,MaxTextExtent,"%g", image->y_resolution); (void) SetImageProperty(image,"tiff:YResolution",value); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); image->units=PixelsPerInchResolution; break; } case 0x0421: { if ((offset > 4) && (*(p+4) == 0)) *has_merged_image=MagickFalse; p+=offset; break; } default: { p+=offset; break; } } if ((offset & 0x01) != 0) p++; } return(profile); } static CompositeOperator PSDBlendModeToCompositeOperator(const char *mode) { if (mode == (const char *) NULL) return(OverCompositeOp); if (LocaleNCompare(mode,"norm",4) == 0) return(OverCompositeOp); if (LocaleNCompare(mode,"mul ",4) == 0) return(MultiplyCompositeOp); if (LocaleNCompare(mode,"diss",4) == 0) return(DissolveCompositeOp); if (LocaleNCompare(mode,"diff",4) == 0) return(DifferenceCompositeOp); if (LocaleNCompare(mode,"dark",4) == 0) return(DarkenCompositeOp); if (LocaleNCompare(mode,"lite",4) == 0) return(LightenCompositeOp); if (LocaleNCompare(mode,"hue ",4) == 0) return(HueCompositeOp); if (LocaleNCompare(mode,"sat ",4) == 0) return(SaturateCompositeOp); if (LocaleNCompare(mode,"colr",4) == 0) return(ColorizeCompositeOp); if (LocaleNCompare(mode,"lum ",4) == 0) return(LuminizeCompositeOp); if (LocaleNCompare(mode,"scrn",4) == 0) return(ScreenCompositeOp); if (LocaleNCompare(mode,"over",4) == 0) return(OverlayCompositeOp); if (LocaleNCompare(mode,"hLit",4) == 0) return(HardLightCompositeOp); if (LocaleNCompare(mode,"sLit",4) == 0) return(SoftLightCompositeOp); if (LocaleNCompare(mode,"smud",4) == 0) return(ExclusionCompositeOp); if (LocaleNCompare(mode,"div ",4) == 0) return(ColorDodgeCompositeOp); if (LocaleNCompare(mode,"idiv",4) == 0) return(ColorBurnCompositeOp); if (LocaleNCompare(mode,"lbrn",4) == 0) return(LinearBurnCompositeOp); if (LocaleNCompare(mode,"lddg",4) == 0) return(LinearDodgeCompositeOp); if (LocaleNCompare(mode,"lLit",4) == 0) return(LinearLightCompositeOp); if (LocaleNCompare(mode,"vLit",4) == 0) return(VividLightCompositeOp); if (LocaleNCompare(mode,"pLit",4) == 0) return(PinLightCompositeOp); if (LocaleNCompare(mode,"hMix",4) == 0) return(HardMixCompositeOp); return(OverCompositeOp); } static inline ssize_t ReadPSDString(Image *image,char *p,const size_t length) { ssize_t count; count=ReadBlob(image,length,(unsigned char *) p); if ((count == (ssize_t) length) && (image->endian != MSBEndian)) { char *q; q=p+length; for(--q; p < q; ++p, --q) { *p = *p ^ *q, *q = *p ^ *q, *p = *p ^ *q; } } return(count); } static inline void SetPSDPixel(Image *image,const size_t channels, const ssize_t type,const size_t packet_size,const Quantum pixel,PixelPacket *q, IndexPacket *indexes,ssize_t x) { if (image->storage_class == PseudoClass) { PixelPacket *color; IndexPacket index; index=(IndexPacket) pixel; if (packet_size == 1) index=(IndexPacket) ScaleQuantumToChar(index); index=ConstrainColormapIndex(image,(ssize_t) index); if (type == 0) SetPixelIndex(indexes+x,index); if ((type == 0) && (channels > 1)) return; color=image->colormap+(ssize_t) GetPixelIndex(indexes+x); if (type != 0) SetPixelAlpha(color,pixel); SetPixelRGBO(q,color); return; } switch (type) { case -1: { SetPixelAlpha(q,pixel); break; } case -2: case 0: { SetPixelRed(q,pixel); if ((channels < 3) || (type == -2)) { SetPixelGreen(q,GetPixelRed(q)); SetPixelBlue(q,GetPixelRed(q)); } break; } case -3: case 1: { SetPixelGreen(q,pixel); break; } case -4: case 2: { SetPixelBlue(q,pixel); break; } case 3: { if (image->colorspace == CMYKColorspace) SetPixelIndex(indexes+x,pixel); else if (image->matte != MagickFalse) SetPixelAlpha(q,pixel); break; } case 4: { if ((IssRGBCompatibleColorspace(image->colorspace) != MagickFalse) && (channels > 3)) break; if (image->matte != MagickFalse) SetPixelAlpha(q,pixel); break; } } } static MagickBooleanType ReadPSDChannelPixels(Image *image, const size_t channels,const ssize_t row,const ssize_t type, const unsigned char *pixels,ExceptionInfo *exception) { Quantum pixel; register const unsigned char *p; register IndexPacket *indexes; register PixelPacket *q; register ssize_t x; size_t packet_size; unsigned short nibble; p=pixels; q=GetAuthenticPixels(image,0,row,image->columns,1,exception); if (q == (PixelPacket *) NULL) return MagickFalse; indexes=GetAuthenticIndexQueue(image); packet_size=GetPSDPacketSize(image); for (x=0; x < (ssize_t) image->columns; x++) { if (packet_size == 1) pixel=ScaleCharToQuantum(*p++); else if (packet_size == 2) { p=PushShortPixel(MSBEndian,p,&nibble); pixel=ScaleShortToQuantum(nibble); } else { MagickFloatType nibble; p=PushFloatPixel(MSBEndian,p,&nibble); pixel=ClampToQuantum((MagickRealType)QuantumRange*nibble); } if (image->depth > 1) { SetPSDPixel(image,channels,type,packet_size,pixel,q,indexes,x); q++; } else { ssize_t bit, number_bits; number_bits=(ssize_t) image->columns-x; if (number_bits > 8) number_bits=8; for (bit=0; bit < number_bits; bit++) { SetPSDPixel(image,channels,type,packet_size,(((unsigned char) pixel) & (0x01 << (7-bit))) != 0 ? 0 : QuantumRange,q++,indexes,x++); } if (x != (ssize_t) image->columns) x--; continue; } } return(SyncAuthenticPixels(image,exception)); } static MagickBooleanType ReadPSDChannelRaw(Image *image,const size_t channels, const ssize_t type,ExceptionInfo *exception) { MagickBooleanType status; size_t row_size; ssize_t count, y; unsigned char *pixels; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer data is RAW"); row_size=GetPSDRowSize(image); pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels)); if (pixels == (unsigned char *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); (void) memset(pixels,0,row_size*sizeof(*pixels)); status=MagickTrue; for (y=0; y < (ssize_t) image->rows; y++) { status=MagickFalse; count=ReadBlob(image,row_size,pixels); if (count != (ssize_t) row_size) { status=MagickFalse; break; } status=ReadPSDChannelPixels(image,channels,y,type,pixels,exception); if (status == MagickFalse) break; } pixels=(unsigned char *) RelinquishMagickMemory(pixels); return(status); } static inline MagickOffsetType *ReadPSDRLESizes(Image *image, const PSDInfo *psd_info,const size_t size) { MagickOffsetType *sizes; ssize_t y; sizes=(MagickOffsetType *) AcquireQuantumMemory(size,sizeof(*sizes)); if(sizes != (MagickOffsetType *) NULL) { for (y=0; y < (ssize_t) size; y++) { if (psd_info->version == 1) sizes[y]=(MagickOffsetType) ReadBlobShort(image); else sizes[y]=(MagickOffsetType) ReadBlobLong(image); } } return sizes; } static MagickBooleanType ReadPSDChannelRLE(Image *image,const PSDInfo *psd_info, const ssize_t type,MagickOffsetType *sizes,ExceptionInfo *exception) { MagickBooleanType status; size_t length, row_size; ssize_t count, y; unsigned char *compact_pixels, *pixels; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer data is RLE compressed"); row_size=GetPSDRowSize(image); pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels)); if (pixels == (unsigned char *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); length=0; for (y=0; y < (ssize_t) image->rows; y++) if ((MagickOffsetType) length < sizes[y]) length=(size_t) sizes[y]; if (length > (row_size+2048)) { pixels=(unsigned char *) RelinquishMagickMemory(pixels); ThrowBinaryException(ResourceLimitError,"InvalidLength",image->filename); } compact_pixels=(unsigned char *) AcquireQuantumMemory(length,sizeof(*pixels)); if (compact_pixels == (unsigned char *) NULL) { pixels=(unsigned char *) RelinquishMagickMemory(pixels); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } (void) memset(compact_pixels,0,length*sizeof(*compact_pixels)); status=MagickTrue; for (y=0; y < (ssize_t) image->rows; y++) { status=MagickFalse; count=ReadBlob(image,(size_t) sizes[y],compact_pixels); if (count != (ssize_t) sizes[y]) break; count=DecodePSDPixels((size_t) sizes[y],compact_pixels, (ssize_t) (image->depth == 1 ? 123456 : image->depth),row_size,pixels); if (count != (ssize_t) row_size) break; status=ReadPSDChannelPixels(image,psd_info->channels,y,type,pixels, exception); if (status == MagickFalse) break; } compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); pixels=(unsigned char *) RelinquishMagickMemory(pixels); return(status); } #ifdef MAGICKCORE_ZLIB_DELEGATE static MagickBooleanType ReadPSDChannelZip(Image *image,const size_t channels, const ssize_t type,const PSDCompressionType compression, const size_t compact_size,ExceptionInfo *exception) { MagickBooleanType status; register unsigned char *p; size_t count, length, packet_size, row_size; ssize_t y; unsigned char *compact_pixels, *pixels; z_stream stream; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer data is ZIP compressed"); if ((MagickSizeType) compact_size > GetBlobSize(image)) ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile", image->filename); compact_pixels=(unsigned char *) AcquireQuantumMemory(compact_size, sizeof(*compact_pixels)); if (compact_pixels == (unsigned char *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); packet_size=GetPSDPacketSize(image); row_size=image->columns*packet_size; count=image->rows*row_size; pixels=(unsigned char *) AcquireQuantumMemory(count,sizeof(*pixels)); if (pixels == (unsigned char *) NULL) { compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } if (ReadBlob(image,compact_size,compact_pixels) != (ssize_t) compact_size) { pixels=(unsigned char *) RelinquishMagickMemory(pixels); compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile", image->filename); } memset(&stream,0,sizeof(stream)); stream.data_type=Z_BINARY; stream.next_in=(Bytef *)compact_pixels; stream.avail_in=(uInt) compact_size; stream.next_out=(Bytef *)pixels; stream.avail_out=(uInt) count; if (inflateInit(&stream) == Z_OK) { int ret; while (stream.avail_out > 0) { ret=inflate(&stream,Z_SYNC_FLUSH); if ((ret != Z_OK) && (ret != Z_STREAM_END)) { (void) inflateEnd(&stream); compact_pixels=(unsigned char *) RelinquishMagickMemory( compact_pixels); pixels=(unsigned char *) RelinquishMagickMemory(pixels); return(MagickFalse); } if (ret == Z_STREAM_END) break; } (void) inflateEnd(&stream); } if (compression == ZipWithPrediction) { p=pixels; while (count > 0) { length=image->columns; while (--length) { if (packet_size == 2) { p[2]+=p[0]+((p[1]+p[3]) >> 8); p[3]+=p[1]; } else *(p+1)+=*p; p+=packet_size; } p+=packet_size; count-=row_size; } } status=MagickTrue; p=pixels; for (y=0; y < (ssize_t) image->rows; y++) { status=ReadPSDChannelPixels(image,channels,y,type,p,exception); if (status == MagickFalse) break; p+=row_size; } compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); pixels=(unsigned char *) RelinquishMagickMemory(pixels); return(status); } #endif static MagickBooleanType ReadPSDChannel(Image *image, const ImageInfo *image_info,const PSDInfo *psd_info,LayerInfo* layer_info, const size_t channel,const PSDCompressionType compression, ExceptionInfo *exception) { Image *channel_image, *mask; MagickOffsetType offset; MagickBooleanType status; channel_image=image; mask=(Image *) NULL; if ((layer_info->channel_info[channel].type < -1) && (layer_info->mask.page.width > 0) && (layer_info->mask.page.height > 0)) { const char *option; /* Ignore mask that is not a user supplied layer mask, if the mask is disabled or if the flags have unsupported values. */ option=GetImageOption(image_info,"psd:preserve-opacity-mask"); if ((layer_info->channel_info[channel].type != -2) || (layer_info->mask.flags > 2) || ((layer_info->mask.flags & 0x02) && (IsStringTrue(option) == MagickFalse))) { (void) SeekBlob(image,(MagickOffsetType) layer_info->channel_info[channel].size-2,SEEK_CUR); return(MagickTrue); } mask=CloneImage(image,layer_info->mask.page.width, layer_info->mask.page.height,MagickFalse,exception); if (mask != (Image *) NULL) { (void) ResetImagePixels(mask,exception); mask->matte=MagickFalse; channel_image=mask; } } offset=TellBlob(image); status=MagickFalse; switch(compression) { case Raw: status=ReadPSDChannelRaw(channel_image,psd_info->channels, (ssize_t) layer_info->channel_info[channel].type,exception); break; case RLE: { MagickOffsetType *sizes; sizes=ReadPSDRLESizes(channel_image,psd_info,channel_image->rows); if (sizes == (MagickOffsetType *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=ReadPSDChannelRLE(channel_image,psd_info, (ssize_t) layer_info->channel_info[channel].type,sizes,exception); sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes); } break; case ZipWithPrediction: case ZipWithoutPrediction: #ifdef MAGICKCORE_ZLIB_DELEGATE status=ReadPSDChannelZip(channel_image,layer_info->channels, (ssize_t) layer_info->channel_info[channel].type,compression, layer_info->channel_info[channel].size-2,exception); #else (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn", "'%s' (ZLIB)",image->filename); #endif break; default: (void) ThrowMagickException(exception,GetMagickModule(),TypeWarning, "CompressionNotSupported","'%.20g'",(double) compression); break; } (void) SeekBlob(image,offset+layer_info->channel_info[channel].size-2, SEEK_SET); if (status == MagickFalse) { if (mask != (Image *) NULL) (void) DestroyImage(mask); ThrowBinaryException(CoderError,"UnableToDecompressImage", image->filename); } if (mask != (Image *) NULL) { if (layer_info->mask.image != (Image *) NULL) layer_info->mask.image=DestroyImage(layer_info->mask.image); layer_info->mask.image=mask; } return(status); } static MagickBooleanType ReadPSDLayer(Image *image,const ImageInfo *image_info, const PSDInfo *psd_info,LayerInfo* layer_info,ExceptionInfo *exception) { char message[MaxTextExtent]; MagickBooleanType status; PSDCompressionType compression; ssize_t j; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " setting up new layer image"); if (psd_info->mode != IndexedMode) (void) SetImageBackgroundColor(layer_info->image); layer_info->image->compose=PSDBlendModeToCompositeOperator( layer_info->blendkey); if (layer_info->visible == MagickFalse) { layer_info->image->compose=NoCompositeOp; (void) SetImageArtifact(layer_info->image,"psd:layer.invisible","true"); } if (psd_info->mode == CMYKMode) (void) SetImageColorspace(layer_info->image,CMYKColorspace); else if ((psd_info->mode == BitmapMode) || (psd_info->mode == DuotoneMode) || (psd_info->mode == GrayscaleMode)) (void) SetImageColorspace(layer_info->image,GRAYColorspace); /* Set up some hidden attributes for folks that need them. */ (void) FormatLocaleString(message,MaxTextExtent,"%.20g", (double) layer_info->page.x); (void) SetImageArtifact(layer_info->image,"psd:layer.x",message); (void) FormatLocaleString(message,MaxTextExtent,"%.20g", (double) layer_info->page.y); (void) SetImageArtifact(layer_info->image,"psd:layer.y",message); (void) FormatLocaleString(message,MaxTextExtent,"%.20g",(double) layer_info->opacity); (void) SetImageArtifact(layer_info->image,"psd:layer.opacity",message); (void) SetImageProperty(layer_info->image,"label",(char *) layer_info->name); status=MagickTrue; for (j=0; j < (ssize_t) layer_info->channels; j++) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading data for channel %.20g",(double) j); compression=(PSDCompressionType) ReadBlobShort(layer_info->image); if ((compression == ZipWithPrediction) && (image->depth == 32)) { (void) ThrowMagickException(exception,GetMagickModule(), TypeError,"CompressionNotSupported","ZipWithPrediction(32 bit)"); return(MagickFalse); } layer_info->image->compression=ConvertPSDCompression(compression); if (layer_info->channel_info[j].type == -1) layer_info->image->matte=MagickTrue; status=ReadPSDChannel(layer_info->image,image_info,psd_info,layer_info, (size_t) j,compression,exception); InheritException(exception,&layer_info->image->exception); if (status == MagickFalse) break; } if (status != MagickFalse) status=ApplyPSDLayerOpacity(layer_info->image,layer_info->opacity, MagickFalse,exception); if ((status != MagickFalse) && (layer_info->image->colorspace == CMYKColorspace)) status=NegateImage(layer_info->image,MagickFalse); if ((status != MagickFalse) && (layer_info->mask.image != (Image *) NULL)) { const char *option; layer_info->mask.image->page.x=layer_info->mask.page.x; layer_info->mask.image->page.y=layer_info->mask.page.y; /* Do not composite the mask when it is disabled */ if ((layer_info->mask.flags & 0x02) == 0x02) layer_info->mask.image->compose=NoCompositeOp; else status=ApplyPSDOpacityMask(layer_info->image,layer_info->mask.image, layer_info->mask.background == 0 ? 0 : QuantumRange,MagickFalse, exception); option=GetImageOption(image_info,"psd:preserve-opacity-mask"); if (IsStringTrue(option) != MagickFalse) PreservePSDOpacityMask(image,layer_info,exception); layer_info->mask.image=DestroyImage(layer_info->mask.image); } return(status); } static MagickBooleanType CheckPSDChannels(const PSDInfo *psd_info, LayerInfo *layer_info) { int channel_type; register ssize_t i; if (layer_info->channels < psd_info->min_channels) return(MagickFalse); channel_type=RedChannel; if (psd_info->min_channels >= 3) channel_type|=(GreenChannel | BlueChannel); if (psd_info->min_channels >= 4) channel_type|=BlackChannel; for (i=0; i < (ssize_t) layer_info->channels; i++) { short type; type=layer_info->channel_info[i].type; if ((i == 0) && (psd_info->mode == IndexedMode) && (type != 0)) return(MagickFalse); if (type == -1) { channel_type|=AlphaChannel; continue; } if (type < -1) continue; if (type == 0) channel_type&=~RedChannel; else if (type == 1) channel_type&=~GreenChannel; else if (type == 2) channel_type&=~BlueChannel; else if (type == 3) channel_type&=~BlackChannel; } if (channel_type == 0) return(MagickTrue); if ((channel_type == AlphaChannel) && (layer_info->channels >= psd_info->min_channels + 1)) return(MagickTrue); return(MagickFalse); } static void CheckMergedImageAlpha(const PSDInfo *psd_info,Image *image) { /* The number of layers cannot be used to determine if the merged image contains an alpha channel. So we enable it when we think we should. */ if (((psd_info->mode == GrayscaleMode) && (psd_info->channels > 1)) || ((psd_info->mode == RGBMode) && (psd_info->channels > 3)) || ((psd_info->mode == CMYKMode) && (psd_info->channels > 4))) image->matte=MagickTrue; } static MagickSizeType GetLayerInfoSize(const PSDInfo *psd_info,Image *image) { char type[4]; MagickSizeType size; ssize_t count; size=GetPSDSize(psd_info,image); if (size != 0) return(size); (void) ReadBlobLong(image); count=ReadPSDString(image,type,4); if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0)) return(0); count=ReadPSDString(image,type,4); if ((count == 4) && ((LocaleNCompare(type,"Mt16",4) == 0) || (LocaleNCompare(type,"Mt32",4) == 0) || (LocaleNCompare(type,"Mtrn",4) == 0))) { size=GetPSDSize(psd_info,image); if (size != 0) return(0); image->matte=MagickTrue; count=ReadPSDString(image,type,4); if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0)) return(0); count=ReadPSDString(image,type,4); } if ((count == 4) && ((LocaleNCompare(type,"Lr16",4) == 0) || (LocaleNCompare(type,"Lr32",4) == 0))) size=GetPSDSize(psd_info,image); return(size); } static MagickBooleanType ReadPSDLayersInternal(Image *image, const ImageInfo *image_info,const PSDInfo *psd_info, const MagickBooleanType skip_layers,ExceptionInfo *exception) { char type[4]; LayerInfo *layer_info; MagickSizeType size; MagickBooleanType status; register ssize_t i; ssize_t count, j, number_layers; size=GetLayerInfoSize(psd_info,image); status=MagickTrue; if (size != 0) { layer_info=(LayerInfo *) NULL; number_layers=(ssize_t) ReadBlobSignedShort(image); if (number_layers < 0) { /* The first alpha channel in the merged result contains the transparency data for the merged result. */ number_layers=MagickAbsoluteValue(number_layers); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " negative layer count corrected for"); image->matte=MagickTrue; } /* We only need to know if the image has an alpha channel */ if (skip_layers != MagickFalse) return(MagickTrue); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image contains %.20g layers",(double) number_layers); if (number_layers == 0) ThrowBinaryException(CorruptImageError,"InvalidNumberOfLayers", image->filename); layer_info=(LayerInfo *) AcquireQuantumMemory((size_t) number_layers, sizeof(*layer_info)); if (layer_info == (LayerInfo *) NULL) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " allocation of LayerInfo failed"); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } (void) memset(layer_info,0,(size_t) number_layers*sizeof(*layer_info)); for (i=0; i < number_layers; i++) { ssize_t top, left, bottom, right; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading layer #%.20g",(double) i+1); top=(ssize_t) ReadBlobSignedLong(image); left=(ssize_t) ReadBlobSignedLong(image); bottom=(ssize_t) ReadBlobSignedLong(image); right=(ssize_t) ReadBlobSignedLong(image); if ((right < left) || (bottom < top)) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"ImproperImageHeader", image->filename); } layer_info[i].page.y=top; layer_info[i].page.x=left; layer_info[i].page.width=(size_t) (right-left); layer_info[i].page.height=(size_t) (bottom-top); layer_info[i].channels=ReadBlobShort(image); if (layer_info[i].channels > MaxPSDChannels) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"MaximumChannelsExceeded", image->filename); } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " offset(%.20g,%.20g), size(%.20g,%.20g), channels=%.20g", (double) layer_info[i].page.x,(double) layer_info[i].page.y, (double) layer_info[i].page.height,(double) layer_info[i].page.width,(double) layer_info[i].channels); for (j=0; j < (ssize_t) layer_info[i].channels; j++) { layer_info[i].channel_info[j].type=(short) ReadBlobShort(image); if ((layer_info[i].channel_info[j].type < -4) || (layer_info[i].channel_info[j].type > 4)) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"NoSuchImageChannel", image->filename); } layer_info[i].channel_info[j].size=(size_t) GetPSDSize(psd_info, image); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " channel[%.20g]: type=%.20g, size=%.20g",(double) j, (double) layer_info[i].channel_info[j].type, (double) layer_info[i].channel_info[j].size); } if (CheckPSDChannels(psd_info,&layer_info[i]) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"ImproperImageHeader", image->filename); } count=ReadPSDString(image,type,4); if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0)) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer type was %.4s instead of 8BIM", type); layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"ImproperImageHeader", image->filename); } count=ReadPSDString(image,layer_info[i].blendkey,4); if (count != 4) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"ImproperImageHeader", image->filename); } layer_info[i].opacity=(Quantum) ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); layer_info[i].clipping=(unsigned char) ReadBlobByte(image); layer_info[i].flags=(unsigned char) ReadBlobByte(image); layer_info[i].visible=!(layer_info[i].flags & 0x02); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " blend=%.4s, opacity=%.20g, clipping=%s, flags=%d, visible=%s", layer_info[i].blendkey,(double) layer_info[i].opacity, layer_info[i].clipping ? "true" : "false",layer_info[i].flags, layer_info[i].visible ? "true" : "false"); (void) ReadBlobByte(image); /* filler */ size=ReadBlobLong(image); if (size != 0) { MagickSizeType combined_length, length; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer contains additional info"); length=ReadBlobLong(image); combined_length=length+4; if (length != 0) { /* Layer mask info. */ layer_info[i].mask.page.y=(ssize_t) ReadBlobSignedLong(image); layer_info[i].mask.page.x=(ssize_t) ReadBlobSignedLong(image); layer_info[i].mask.page.height=(size_t) ( ReadBlobSignedLong(image)-layer_info[i].mask.page.y); layer_info[i].mask.page.width=(size_t) ( ReadBlobSignedLong(image)-layer_info[i].mask.page.x); layer_info[i].mask.background=(unsigned char) ReadBlobByte( image); layer_info[i].mask.flags=(unsigned char) ReadBlobByte(image); if (!(layer_info[i].mask.flags & 0x01)) { layer_info[i].mask.page.y=layer_info[i].mask.page.y- layer_info[i].page.y; layer_info[i].mask.page.x=layer_info[i].mask.page.x- layer_info[i].page.x; } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer mask: offset(%.20g,%.20g), size(%.20g,%.20g), length=%.20g", (double) layer_info[i].mask.page.x,(double) layer_info[i].mask.page.y,(double) layer_info[i].mask.page.width, (double) layer_info[i].mask.page.height,(double) ((MagickOffsetType) length)-18); /* Skip over the rest of the layer mask information. */ if (DiscardBlobBytes(image,(MagickSizeType) (length-18)) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile", image->filename); } } length=ReadBlobLong(image); combined_length+=length+4; if (length != 0) { /* Layer blending ranges info. */ if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer blending ranges: length=%.20g",(double) ((MagickOffsetType) length)); if (DiscardBlobBytes(image,length) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile",image->filename); } } /* Layer name. */ length=(MagickSizeType) (unsigned char) ReadBlobByte(image); combined_length+=length+1; if (length > 0) (void) ReadBlob(image,(size_t) length++,layer_info[i].name); layer_info[i].name[length]='\0'; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer name: %s",layer_info[i].name); if ((length % 4) != 0) { length=4-(length % 4); combined_length+=length; /* Skip over the padding of the layer name */ if (DiscardBlobBytes(image,length) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile",image->filename); } } length=(MagickSizeType) size-combined_length; if (length > 0) { unsigned char *info; if (length > GetBlobSize(image)) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "InsufficientImageDataInFile",image->filename); } layer_info[i].info=AcquireStringInfo((const size_t) length); info=GetStringInfoDatum(layer_info[i].info); (void) ReadBlob(image,(const size_t) length,info); } } } for (i=0; i < number_layers; i++) { if ((layer_info[i].page.width == 0) || (layer_info[i].page.height == 0)) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer data is empty"); if (layer_info[i].info != (StringInfo *) NULL) layer_info[i].info=DestroyStringInfo(layer_info[i].info); continue; } /* Allocate layered image. */ layer_info[i].image=CloneImage(image,layer_info[i].page.width, layer_info[i].page.height,MagickFalse,exception); if (layer_info[i].image == (Image *) NULL) { layer_info=DestroyLayerInfo(layer_info,number_layers); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " allocation of image for layer %.20g failed",(double) i); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } if (layer_info[i].info != (StringInfo *) NULL) { (void) SetImageProfile(layer_info[i].image,"psd:additional-info", layer_info[i].info); layer_info[i].info=DestroyStringInfo(layer_info[i].info); } } if (image_info->ping == MagickFalse) { for (i=0; i < number_layers; i++) { if (layer_info[i].image == (Image *) NULL) { for (j=0; j < (ssize_t) layer_info[i].channels; j++) { if (DiscardBlobBytes(image,(MagickSizeType) layer_info[i].channel_info[j].size) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile",image->filename); } } continue; } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading data for layer %.20g",(double) i); status=ReadPSDLayer(image,image_info,psd_info,&layer_info[i], exception); if (status == MagickFalse) break; status=SetImageProgress(image,LoadImagesTag,(MagickOffsetType) i, (MagickSizeType) number_layers); if (status == MagickFalse) break; } } if (status != MagickFalse) { for (i=0; i < number_layers; i++) { if (layer_info[i].image == (Image *) NULL) { for (j=i; j < number_layers - 1; j++) layer_info[j] = layer_info[j+1]; number_layers--; i--; } } if (number_layers > 0) { for (i=0; i < number_layers; i++) { if (i > 0) layer_info[i].image->previous=layer_info[i-1].image; if (i < (number_layers-1)) layer_info[i].image->next=layer_info[i+1].image; layer_info[i].image->page=layer_info[i].page; } image->next=layer_info[0].image; layer_info[0].image->previous=image; } layer_info=(LayerInfo *) RelinquishMagickMemory(layer_info); } else layer_info=DestroyLayerInfo(layer_info,number_layers); } return(status); } ModuleExport MagickBooleanType ReadPSDLayers(Image *image, const ImageInfo *image_info,const PSDInfo *psd_info, const MagickBooleanType skip_layers,ExceptionInfo *exception) { PolicyDomain domain; PolicyRights rights; domain=CoderPolicyDomain; rights=ReadPolicyRights; if (IsRightsAuthorized(domain,rights,"PSD") == MagickFalse) return(MagickFalse); return(ReadPSDLayersInternal(image,image_info,psd_info,skip_layers, exception)); } static MagickBooleanType ReadPSDMergedImage(const ImageInfo *image_info, Image* image,const PSDInfo* psd_info,ExceptionInfo *exception) { MagickOffsetType *sizes; MagickBooleanType status; PSDCompressionType compression; register ssize_t i; compression=(PSDCompressionType) ReadBlobMSBShort(image); image->compression=ConvertPSDCompression(compression); if (compression != Raw && compression != RLE) { (void) ThrowMagickException(exception,GetMagickModule(), TypeWarning,"CompressionNotSupported","'%.20g'",(double) compression); return(MagickFalse); } sizes=(MagickOffsetType *) NULL; if (compression == RLE) { sizes=ReadPSDRLESizes(image,psd_info,image->rows*psd_info->channels); if (sizes == (MagickOffsetType *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } status=MagickTrue; for (i=0; i < (ssize_t) psd_info->channels; i++) { ssize_t type; type=i; if ((type == 1) && (psd_info->channels == 2)) type=-1; if (compression == RLE) status=ReadPSDChannelRLE(image,psd_info,type,sizes+(i*image->rows), exception); else status=ReadPSDChannelRaw(image,psd_info->channels,type,exception); if (status != MagickFalse) status=SetImageProgress(image,LoadImagesTag,(MagickOffsetType) i, psd_info->channels); if (status == MagickFalse) break; } if ((status != MagickFalse) && (image->colorspace == CMYKColorspace)) status=NegateImage(image,MagickFalse); if (status != MagickFalse) status=CorrectPSDAlphaBlend(image_info,image,exception); sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes); return(status); } static Image *ReadPSDImage(const ImageInfo *image_info,ExceptionInfo *exception) { Image *image; MagickBooleanType has_merged_image, skip_layers; MagickOffsetType offset; MagickSizeType length; MagickBooleanType status; PSDInfo psd_info; register ssize_t i; size_t imageListLength; ssize_t count; StringInfo *profile; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=AcquireImage(image_info); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Read image header. */ image->endian=MSBEndian; count=ReadBlob(image,4,(unsigned char *) psd_info.signature); psd_info.version=ReadBlobMSBShort(image); if ((count != 4) || (LocaleNCompare(psd_info.signature,"8BPS",4) != 0) || ((psd_info.version != 1) && (psd_info.version != 2))) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); (void) ReadBlob(image,6,psd_info.reserved); psd_info.channels=ReadBlobMSBShort(image); if (psd_info.channels < 1) ThrowReaderException(CorruptImageError,"MissingImageChannel"); if (psd_info.channels > MaxPSDChannels) ThrowReaderException(CorruptImageError,"MaximumChannelsExceeded"); psd_info.rows=ReadBlobMSBLong(image); psd_info.columns=ReadBlobMSBLong(image); if ((psd_info.version == 1) && ((psd_info.rows > 30000) || (psd_info.columns > 30000))) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); psd_info.depth=ReadBlobMSBShort(image); if ((psd_info.depth != 1) && (psd_info.depth != 8) && (psd_info.depth != 16) && (psd_info.depth != 32)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); psd_info.mode=ReadBlobMSBShort(image); if ((psd_info.mode == IndexedMode) && (psd_info.channels > 3)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Image is %.20g x %.20g with channels=%.20g, depth=%.20g, mode=%s", (double) psd_info.columns,(double) psd_info.rows,(double) psd_info.channels,(double) psd_info.depth,ModeToString((PSDImageType) psd_info.mode)); if (EOFBlob(image) != MagickFalse) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); /* Initialize image. */ image->depth=psd_info.depth; image->columns=psd_info.columns; image->rows=psd_info.rows; status=SetImageExtent(image,image->columns,image->rows); if (status == MagickFalse) { InheritException(exception,&image->exception); return(DestroyImageList(image)); } status=ResetImagePixels(image,exception); if (status == MagickFalse) { InheritException(exception,&image->exception); return(DestroyImageList(image)); } psd_info.min_channels=3; if (psd_info.mode == LabMode) (void) SetImageColorspace(image,LabColorspace); if (psd_info.mode == CMYKMode) { psd_info.min_channels=4; (void) SetImageColorspace(image,CMYKColorspace); } else if ((psd_info.mode == BitmapMode) || (psd_info.mode == GrayscaleMode) || (psd_info.mode == DuotoneMode)) { if (psd_info.depth != 32) { status=AcquireImageColormap(image,MagickMin((size_t) (psd_info.depth < 16 ? 256 : 65536), MaxColormapSize)); if (status == MagickFalse) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Image colormap allocated"); } psd_info.min_channels=1; (void) SetImageColorspace(image,GRAYColorspace); } if (psd_info.channels < psd_info.min_channels) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); /* Read PSD raster colormap only present for indexed and duotone images. */ length=ReadBlobMSBLong(image); if ((psd_info.mode == IndexedMode) && (length < 3)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (length != 0) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading colormap"); if ((psd_info.mode == DuotoneMode) || (psd_info.depth == 32)) { /* Duotone image data; the format of this data is undocumented. */ (void) SeekBlob(image,(const MagickOffsetType) length,SEEK_CUR); } else { size_t number_colors; /* Read PSD raster colormap. */ number_colors=(size_t) length/3; if (number_colors > 65536) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (AcquireImageColormap(image,number_colors) == MagickFalse) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); for (i=0; i < (ssize_t) image->colors; i++) image->colormap[i].red=ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); for (i=0; i < (ssize_t) image->colors; i++) image->colormap[i].green=ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); for (i=0; i < (ssize_t) image->colors; i++) image->colormap[i].blue=ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); image->matte=MagickFalse; } } if ((image->depth == 1) && (image->storage_class != PseudoClass)) ThrowReaderException(CorruptImageError, "ImproperImageHeader"); has_merged_image=MagickTrue; profile=(StringInfo *) NULL; length=ReadBlobMSBLong(image); if (length != 0) { unsigned char *blocks; /* Image resources block. */ if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading image resource blocks - %.20g bytes",(double) ((MagickOffsetType) length)); if (length > GetBlobSize(image)) ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile"); blocks=(unsigned char *) AcquireQuantumMemory((size_t) length, sizeof(*blocks)); if (blocks == (unsigned char *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); count=ReadBlob(image,(size_t) length,blocks); if ((count != (ssize_t) length) || (length < 4) || (LocaleNCompare((char *) blocks,"8BIM",4) != 0)) { blocks=(unsigned char *) RelinquishMagickMemory(blocks); ThrowReaderException(CorruptImageError,"ImproperImageHeader"); } profile=ParseImageResourceBlocks(image,blocks,(size_t) length, &has_merged_image); blocks=(unsigned char *) RelinquishMagickMemory(blocks); } /* Layer and mask block. */ length=GetPSDSize(&psd_info,image); if (length == 8) { length=ReadBlobMSBLong(image); length=ReadBlobMSBLong(image); } offset=TellBlob(image); skip_layers=MagickFalse; if ((image_info->number_scenes == 1) && (image_info->scene == 0) && (has_merged_image != MagickFalse)) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " read composite only"); skip_layers=MagickTrue; } if (length == 0) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image has no layers"); } else { if (ReadPSDLayersInternal(image,image_info,&psd_info,skip_layers, exception) != MagickTrue) { if (profile != (StringInfo *) NULL) profile=DestroyStringInfo(profile); (void) CloseBlob(image); image=DestroyImageList(image); return((Image *) NULL); } /* Skip the rest of the layer and mask information. */ (void) SeekBlob(image,offset+length,SEEK_SET); } /* If we are only "pinging" the image, then we're done - so return. */ if (EOFBlob(image) != MagickFalse) { if (profile != (StringInfo *) NULL) profile=DestroyStringInfo(profile); ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile"); } if (image_info->ping != MagickFalse) { if (profile != (StringInfo *) NULL) profile=DestroyStringInfo(profile); (void) CloseBlob(image); return(GetFirstImageInList(image)); } /* Read the precombined layer, present for PSD < 4 compatibility. */ if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading the precombined layer"); imageListLength=GetImageListLength(image); if (has_merged_image != MagickFalse || imageListLength == 1) has_merged_image=(MagickBooleanType) ReadPSDMergedImage(image_info,image, &psd_info,exception); if ((has_merged_image == MagickFalse) && (imageListLength == 1) && (length != 0)) { (void) SeekBlob(image,offset,SEEK_SET); status=ReadPSDLayersInternal(image,image_info,&psd_info,MagickFalse, exception); if (status != MagickTrue) { if (profile != (StringInfo *) NULL) profile=DestroyStringInfo(profile); (void) CloseBlob(image); image=DestroyImageList(image); return((Image *) NULL); } } if (has_merged_image == MagickFalse) { Image *merged; if (imageListLength == 1) { if (profile != (StringInfo *) NULL) profile=DestroyStringInfo(profile); ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile"); } image->background_color.opacity=TransparentOpacity; (void) SetImageBackgroundColor(image); merged=MergeImageLayers(image,FlattenLayer,exception); ReplaceImageInList(&image,merged); } if (profile != (StringInfo *) NULL) { Image *next; next=image; while (next != (Image *) NULL) { (void) SetImageProfile(next,GetStringInfoName(profile),profile); next=next->next; } profile=DestroyStringInfo(profile); } (void) CloseBlob(image); return(GetFirstImageInList(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e g i s t e r P S D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RegisterPSDImage() adds properties for the PSD image format to % the list of supported formats. The properties include the image format % tag, a method to read and/or write the format, whether the format % supports the saving of more than one frame to the same file or blob, % whether the format supports native in-memory I/O, and a brief % description of the format. % % The format of the RegisterPSDImage method is: % % size_t RegisterPSDImage(void) % */ ModuleExport size_t RegisterPSDImage(void) { MagickInfo *entry; entry=SetMagickInfo("PSB"); entry->decoder=(DecodeImageHandler *) ReadPSDImage; entry->encoder=(EncodeImageHandler *) WritePSDImage; entry->magick=(IsImageFormatHandler *) IsPSD; entry->seekable_stream=MagickTrue; entry->description=ConstantString("Adobe Large Document Format"); entry->magick_module=ConstantString("PSD"); (void) RegisterMagickInfo(entry); entry=SetMagickInfo("PSD"); entry->decoder=(DecodeImageHandler *) ReadPSDImage; entry->encoder=(EncodeImageHandler *) WritePSDImage; entry->magick=(IsImageFormatHandler *) IsPSD; entry->seekable_stream=MagickTrue; entry->description=ConstantString("Adobe Photoshop bitmap"); entry->magick_module=ConstantString("PSD"); (void) RegisterMagickInfo(entry); return(MagickImageCoderSignature); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n r e g i s t e r P S D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnregisterPSDImage() removes format registrations made by the % PSD module from the list of supported formats. % % The format of the UnregisterPSDImage method is: % % UnregisterPSDImage(void) % */ ModuleExport void UnregisterPSDImage(void) { (void) UnregisterMagickInfo("PSB"); (void) UnregisterMagickInfo("PSD"); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e P S D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WritePSDImage() writes an image in the Adobe Photoshop encoded image format. % % The format of the WritePSDImage method is: % % MagickBooleanType WritePSDImage(const ImageInfo *image_info,Image *image) % % A description of each parameter follows. % % o image_info: the image info. % % o image: The image. % */ static inline ssize_t SetPSDOffset(const PSDInfo *psd_info,Image *image, const size_t offset) { if (psd_info->version == 1) return(WriteBlobMSBShort(image,(unsigned short) offset)); return(WriteBlobMSBLong(image,(unsigned int) offset)); } static inline ssize_t WritePSDOffset(const PSDInfo *psd_info,Image *image, const MagickSizeType size,const MagickOffsetType offset) { MagickOffsetType current_offset; ssize_t result; current_offset=TellBlob(image); (void) SeekBlob(image,offset,SEEK_SET); if (psd_info->version == 1) result=WriteBlobMSBShort(image,(unsigned short) size); else result=WriteBlobMSBLong(image,(unsigned int) size); (void) SeekBlob(image,current_offset,SEEK_SET); return(result); } static inline ssize_t SetPSDSize(const PSDInfo *psd_info,Image *image, const MagickSizeType size) { if (psd_info->version == 1) return(WriteBlobMSBLong(image,(unsigned int) size)); return(WriteBlobMSBLongLong(image,size)); } static inline ssize_t WritePSDSize(const PSDInfo *psd_info,Image *image, const MagickSizeType size,const MagickOffsetType offset) { MagickOffsetType current_offset; ssize_t result; current_offset=TellBlob(image); (void) SeekBlob(image,offset,SEEK_SET); if (psd_info->version == 1) result=WriteBlobMSBLong(image,(unsigned int) size); else result=WriteBlobMSBLongLong(image,size); (void) SeekBlob(image,current_offset,SEEK_SET); return(result); } static size_t PSDPackbitsEncodeImage(Image *image,const size_t length, const unsigned char *pixels,unsigned char *compact_pixels) { int count; register ssize_t i, j; register unsigned char *q; unsigned char *packbits; /* Compress pixels with Packbits encoding. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(pixels != (unsigned char *) NULL); assert(compact_pixels != (unsigned char *) NULL); packbits=(unsigned char *) AcquireQuantumMemory(128UL,sizeof(*packbits)); if (packbits == (unsigned char *) NULL) ThrowBinaryImageException(ResourceLimitError,"MemoryAllocationFailed", image->filename); q=compact_pixels; for (i=(ssize_t) length; i != 0; ) { switch (i) { case 1: { i--; *q++=(unsigned char) 0; *q++=(*pixels); break; } case 2: { i-=2; *q++=(unsigned char) 1; *q++=(*pixels); *q++=pixels[1]; break; } case 3: { i-=3; if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2))) { *q++=(unsigned char) ((256-3)+1); *q++=(*pixels); break; } *q++=(unsigned char) 2; *q++=(*pixels); *q++=pixels[1]; *q++=pixels[2]; break; } default: { if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2))) { /* Packed run. */ count=3; while (((ssize_t) count < i) && (*pixels == *(pixels+count))) { count++; if (count >= 127) break; } i-=count; *q++=(unsigned char) ((256-count)+1); *q++=(*pixels); pixels+=count; break; } /* Literal run. */ count=0; while ((*(pixels+count) != *(pixels+count+1)) || (*(pixels+count+1) != *(pixels+count+2))) { packbits[count+1]=pixels[count]; count++; if (((ssize_t) count >= (i-3)) || (count >= 127)) break; } i-=count; *packbits=(unsigned char) (count-1); for (j=0; j <= (ssize_t) count; j++) *q++=packbits[j]; pixels+=count; break; } } } *q++=(unsigned char) 128; /* EOD marker */ packbits=(unsigned char *) RelinquishMagickMemory(packbits); return((size_t) (q-compact_pixels)); } static size_t WriteCompressionStart(const PSDInfo *psd_info,Image *image, const Image *next_image,const ssize_t channels) { ssize_t i, offset, y; if (next_image->compression == RLECompression) { offset=WriteBlobMSBShort(image,RLE); for (i=0; i < channels; i++) for (y=0; y < (ssize_t) next_image->rows; y++) offset+=SetPSDOffset(psd_info,image,0); } #ifdef MAGICKCORE_ZLIB_DELEGATE else if (next_image->compression == ZipCompression) offset=WriteBlobMSBShort(image,ZipWithoutPrediction); #endif else offset=WriteBlobMSBShort(image,Raw); return((size_t) offset); } static size_t WritePSDChannel(const PSDInfo *psd_info, const ImageInfo *image_info,Image *image,Image *next_image, const QuantumType quantum_type, unsigned char *compact_pixels, MagickOffsetType size_offset,const MagickBooleanType separate) { MagickBooleanType monochrome; QuantumInfo *quantum_info; register const PixelPacket *p; register ssize_t i; size_t count, length; ssize_t y; unsigned char *pixels; #ifdef MAGICKCORE_ZLIB_DELEGATE int flush, level; unsigned char *compressed_pixels; z_stream stream; compressed_pixels=(unsigned char *) NULL; flush=Z_NO_FLUSH; #endif count=0; if (separate != MagickFalse) { size_offset=TellBlob(image)+2; count+=WriteCompressionStart(psd_info,image,next_image,1); } if (next_image->depth > 8) next_image->depth=16; monochrome=IsMonochromeImage(image,&image->exception) && (image->depth == 1) ? MagickTrue : MagickFalse; quantum_info=AcquireQuantumInfo(image_info,next_image); if (quantum_info == (QuantumInfo *) NULL) return(0); pixels=GetQuantumPixels(quantum_info); #ifdef MAGICKCORE_ZLIB_DELEGATE if (next_image->compression == ZipCompression) { compressed_pixels=(unsigned char *) AcquireQuantumMemory( MagickMinBufferExtent,sizeof(*compressed_pixels)); if (compressed_pixels == (unsigned char *) NULL) { quantum_info=DestroyQuantumInfo(quantum_info); return(0); } memset(&stream,0,sizeof(stream)); stream.data_type=Z_BINARY; level=Z_DEFAULT_COMPRESSION; if ((image_info->quality > 0 && image_info->quality < 10)) level=(int) image_info->quality; if (deflateInit(&stream,level) != Z_OK) { quantum_info=DestroyQuantumInfo(quantum_info); compressed_pixels=(unsigned char *) RelinquishMagickMemory( compressed_pixels); return(0); } } #endif for (y=0; y < (ssize_t) next_image->rows; y++) { p=GetVirtualPixels(next_image,0,y,next_image->columns,1,&image->exception); if (p == (const PixelPacket *) NULL) break; length=ExportQuantumPixels(next_image,(CacheView *) NULL,quantum_info, quantum_type,pixels,&image->exception); if (monochrome != MagickFalse) for (i=0; i < (ssize_t) length; i++) pixels[i]=(~pixels[i]); if (next_image->compression == RLECompression) { length=PSDPackbitsEncodeImage(image,length,pixels,compact_pixels); count+=WriteBlob(image,length,compact_pixels); size_offset+=WritePSDOffset(psd_info,image,length,size_offset); } #ifdef MAGICKCORE_ZLIB_DELEGATE else if (next_image->compression == ZipCompression) { stream.avail_in=(uInt) length; stream.next_in=(Bytef *) pixels; if (y == (ssize_t) next_image->rows-1) flush=Z_FINISH; do { stream.avail_out=(uInt) MagickMinBufferExtent; stream.next_out=(Bytef *) compressed_pixels; if (deflate(&stream,flush) == Z_STREAM_ERROR) break; length=(size_t) MagickMinBufferExtent-stream.avail_out; if (length > 0) count+=WriteBlob(image,length,compressed_pixels); } while (stream.avail_out == 0); } #endif else count+=WriteBlob(image,length,pixels); } #ifdef MAGICKCORE_ZLIB_DELEGATE if (next_image->compression == ZipCompression) { (void) deflateEnd(&stream); compressed_pixels=(unsigned char *) RelinquishMagickMemory( compressed_pixels); } #endif quantum_info=DestroyQuantumInfo(quantum_info); return(count); } static unsigned char *AcquireCompactPixels(Image *image) { size_t packet_size; unsigned char *compact_pixels; packet_size=image->depth > 8UL ? 2UL : 1UL; compact_pixels=(unsigned char *) AcquireQuantumMemory((9* image->columns)+1,packet_size*sizeof(*compact_pixels)); if (compact_pixels == (unsigned char *) NULL) { (void) ThrowMagickException(&image->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); } return(compact_pixels); } static ssize_t WritePSDChannels(const PSDInfo *psd_info, const ImageInfo *image_info,Image *image,Image *next_image, MagickOffsetType size_offset,const MagickBooleanType separate) { Image *mask; MagickOffsetType rows_offset; size_t channels, length, offset_length; ssize_t count; unsigned char *compact_pixels; count=0; offset_length=0; rows_offset=0; compact_pixels=(unsigned char *) NULL; if (next_image->compression == RLECompression) { compact_pixels=AcquireCompactPixels(next_image); if (compact_pixels == (unsigned char *) NULL) return(0); } channels=1; if (separate == MagickFalse) { if ((next_image->storage_class != PseudoClass) || (IsGrayImage(next_image,&next_image->exception) != MagickFalse)) { if (IsGrayImage(next_image,&next_image->exception) == MagickFalse) channels=(size_t) (next_image->colorspace == CMYKColorspace ? 4 : 3); if (next_image->matte != MagickFalse) channels++; } rows_offset=TellBlob(image)+2; count+=WriteCompressionStart(psd_info,image,next_image,(ssize_t) channels); offset_length=(next_image->rows*(psd_info->version == 1 ? 2 : 4)); } size_offset+=2; if ((next_image->storage_class == PseudoClass) && (IsGrayImage(next_image,&next_image->exception) == MagickFalse)) { length=WritePSDChannel(psd_info,image_info,image,next_image, IndexQuantum,compact_pixels,rows_offset,separate); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } else { if (IsGrayImage(next_image,&next_image->exception) != MagickFalse) { length=WritePSDChannel(psd_info,image_info,image,next_image, GrayQuantum,compact_pixels,rows_offset,separate); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } else { if (next_image->colorspace == CMYKColorspace) (void) NegateImage(next_image,MagickFalse); length=WritePSDChannel(psd_info,image_info,image,next_image, RedQuantum,compact_pixels,rows_offset,separate); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; length=WritePSDChannel(psd_info,image_info,image,next_image, GreenQuantum,compact_pixels,rows_offset,separate); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; length=WritePSDChannel(psd_info,image_info,image,next_image, BlueQuantum,compact_pixels,rows_offset,separate); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; if (next_image->colorspace == CMYKColorspace) { length=WritePSDChannel(psd_info,image_info,image,next_image, BlackQuantum,compact_pixels,rows_offset,separate); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } } if (next_image->matte != MagickFalse) { length=WritePSDChannel(psd_info,image_info,image,next_image, AlphaQuantum,compact_pixels,rows_offset,separate); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } } compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); if (next_image->colorspace == CMYKColorspace) (void) NegateImage(next_image,MagickFalse); if (separate != MagickFalse) { const char *property; property=GetImageArtifact(next_image,"psd:opacity-mask"); if (property != (const char *) NULL) { mask=(Image *) GetImageRegistry(ImageRegistryType,property, &image->exception); if (mask != (Image *) NULL) { if (mask->compression == RLECompression) { compact_pixels=AcquireCompactPixels(mask); if (compact_pixels == (unsigned char *) NULL) return(0); } length=WritePSDChannel(psd_info,image_info,image,mask, RedQuantum,compact_pixels,rows_offset,MagickTrue); (void) WritePSDSize(psd_info,image,length,size_offset); count+=length; compact_pixels=(unsigned char *) RelinquishMagickMemory( compact_pixels); } } } return(count); } static size_t WritePascalString(Image *image,const char *value,size_t padding) { size_t count, length; register ssize_t i; /* Max length is 255. */ count=0; length=(strlen(value) > 255UL ) ? 255UL : strlen(value); if (length == 0) count+=WriteBlobByte(image,0); else { count+=WriteBlobByte(image,(unsigned char) length); count+=WriteBlob(image,length,(const unsigned char *) value); } length++; if ((length % padding) == 0) return(count); for (i=0; i < (ssize_t) (padding-(length % padding)); i++) count+=WriteBlobByte(image,0); return(count); } static void WriteResolutionResourceBlock(Image *image) { double x_resolution, y_resolution; unsigned short units; if (image->units == PixelsPerCentimeterResolution) { x_resolution=2.54*65536.0*image->x_resolution+0.5; y_resolution=2.54*65536.0*image->y_resolution+0.5; units=2; } else { x_resolution=65536.0*image->x_resolution+0.5; y_resolution=65536.0*image->y_resolution+0.5; units=1; } (void) WriteBlob(image,4,(const unsigned char *) "8BIM"); (void) WriteBlobMSBShort(image,0x03ED); (void) WriteBlobMSBShort(image,0); (void) WriteBlobMSBLong(image,16); /* resource size */ (void) WriteBlobMSBLong(image,(unsigned int) (x_resolution+0.5)); (void) WriteBlobMSBShort(image,units); /* horizontal resolution unit */ (void) WriteBlobMSBShort(image,units); /* width unit */ (void) WriteBlobMSBLong(image,(unsigned int) (y_resolution+0.5)); (void) WriteBlobMSBShort(image,units); /* vertical resolution unit */ (void) WriteBlobMSBShort(image,units); /* height unit */ } static inline size_t WriteChannelSize(const PSDInfo *psd_info,Image *image, const signed short channel) { ssize_t count; count=WriteBlobMSBSignedShort(image,channel); count+=SetPSDSize(psd_info,image,0); return((size_t) count); } static void RemoveICCProfileFromResourceBlock(StringInfo *bim_profile) { register const unsigned char *p; size_t length; unsigned char *datum; unsigned int count, long_sans; unsigned short id, short_sans; length=GetStringInfoLength(bim_profile); if (length < 16) return; datum=GetStringInfoDatum(bim_profile); for (p=datum; (p >= datum) && (p < (datum+length-16)); ) { register unsigned char *q; q=(unsigned char *) p; if (LocaleNCompare((const char *) p,"8BIM",4) != 0) break; p=PushLongPixel(MSBEndian,p,&long_sans); p=PushShortPixel(MSBEndian,p,&id); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushLongPixel(MSBEndian,p,&count); if (id == 0x0000040f) { ssize_t quantum; quantum=PSDQuantum(count)+12; if ((quantum >= 12) && (quantum < (ssize_t) length)) { if ((q+quantum < (datum+length-16))) (void) memmove(q,q+quantum,length-quantum-(q-datum)); SetStringInfoLength(bim_profile,length-quantum); } break; } p+=count; if ((count & 0x01) != 0) p++; } } static void RemoveResolutionFromResourceBlock(StringInfo *bim_profile) { register const unsigned char *p; size_t length; unsigned char *datum; unsigned int count, long_sans; unsigned short id, short_sans; length=GetStringInfoLength(bim_profile); if (length < 16) return; datum=GetStringInfoDatum(bim_profile); for (p=datum; (p >= datum) && (p < (datum+length-16)); ) { register unsigned char *q; ssize_t cnt; q=(unsigned char *) p; if (LocaleNCompare((const char *) p,"8BIM",4) != 0) return; p=PushLongPixel(MSBEndian,p,&long_sans); p=PushShortPixel(MSBEndian,p,&id); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushLongPixel(MSBEndian,p,&count); cnt=PSDQuantum(count); if (cnt < 0) return; if ((id == 0x000003ed) && (cnt < (ssize_t) (length-12)) && ((ssize_t) length-(cnt+12)-(q-datum)) > 0) { (void) memmove(q,q+cnt+12,length-(cnt+12)-(q-datum)); SetStringInfoLength(bim_profile,length-(cnt+12)); break; } p+=count; if ((count & 0x01) != 0) p++; } } static const StringInfo *GetAdditionalInformation(const ImageInfo *image_info, Image *image) { #define PSDKeySize 5 #define PSDAllowedLength 36 char key[PSDKeySize]; /* Whitelist of keys from: https://www.adobe.com/devnet-apps/photoshop/fileformatashtml/ */ const char allowed[PSDAllowedLength][PSDKeySize] = { "blnc", "blwh", "brit", "brst", "clbl", "clrL", "curv", "expA", "FMsk", "GdFl", "grdm", "hue ", "hue2", "infx", "knko", "lclr", "levl", "lnsr", "lfx2", "luni", "lrFX", "lspf", "lyid", "lyvr", "mixr", "nvrt", "phfl", "post", "PtFl", "selc", "shpa", "sn2P", "SoCo", "thrs", "tsly", "vibA" }, *option; const StringInfo *info; MagickBooleanType found; register size_t i; size_t remaining_length, length; StringInfo *profile; unsigned char *p; unsigned int size; info=GetImageProfile(image,"psd:additional-info"); if (info == (const StringInfo *) NULL) return((const StringInfo *) NULL); option=GetImageOption(image_info,"psd:additional-info"); if (LocaleCompare(option,"all") == 0) return(info); if (LocaleCompare(option,"selective") != 0) { profile=RemoveImageProfile(image,"psd:additional-info"); return(DestroyStringInfo(profile)); } length=GetStringInfoLength(info); p=GetStringInfoDatum(info); remaining_length=length; length=0; while (remaining_length >= 12) { /* skip over signature */ p+=4; key[0]=(char) (*p++); key[1]=(char) (*p++); key[2]=(char) (*p++); key[3]=(char) (*p++); key[4]='\0'; size=(unsigned int) (*p++) << 24; size|=(unsigned int) (*p++) << 16; size|=(unsigned int) (*p++) << 8; size|=(unsigned int) (*p++); size=size & 0xffffffff; remaining_length-=12; if ((size_t) size > remaining_length) return((const StringInfo *) NULL); found=MagickFalse; for (i=0; i < PSDAllowedLength; i++) { if (LocaleNCompare(key,allowed[i],PSDKeySize) != 0) continue; found=MagickTrue; break; } remaining_length-=(size_t) size; if (found == MagickFalse) { if (remaining_length > 0) p=(unsigned char *) memmove(p-12,p+size,remaining_length); continue; } length+=(size_t) size+12; p+=size; } profile=RemoveImageProfile(image,"psd:additional-info"); if (length == 0) return(DestroyStringInfo(profile)); SetStringInfoLength(profile,(const size_t) length); (void) SetImageProfile(image,"psd:additional-info",info); return(profile); } static MagickBooleanType WritePSDImage(const ImageInfo *image_info, Image *image) { char layer_name[MaxTextExtent]; const char *property; const StringInfo *icc_profile, *info; Image *base_image, *next_image; MagickBooleanType status; MagickOffsetType *layer_size_offsets, size_offset; PSDInfo psd_info; register ssize_t i; size_t layer_count, layer_index, length, name_length, num_channels, packet_size, rounded_size, size; StringInfo *bim_profile; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=OpenBlob(image_info,image,WriteBinaryBlobMode,&image->exception); if (status == MagickFalse) return(status); packet_size=(size_t) (image->depth > 8 ? 6 : 3); if (image->matte != MagickFalse) packet_size+=image->depth > 8 ? 2 : 1; psd_info.version=1; if ((LocaleCompare(image_info->magick,"PSB") == 0) || (image->columns > 30000) || (image->rows > 30000)) psd_info.version=2; (void) WriteBlob(image,4,(const unsigned char *) "8BPS"); (void) WriteBlobMSBShort(image,psd_info.version); /* version */ for (i=1; i <= 6; i++) (void) WriteBlobByte(image, 0); /* 6 bytes of reserved */ /* When the image has a color profile it won't be converted to gray scale */ if ((GetImageProfile(image,"icc") == (StringInfo *) NULL) && (SetImageGray(image,&image->exception) != MagickFalse)) num_channels=(image->matte != MagickFalse ? 2UL : 1UL); else if ((image_info->type != TrueColorType) && (image_info->type != TrueColorMatteType) && (image->storage_class == PseudoClass)) num_channels=(image->matte != MagickFalse ? 2UL : 1UL); else { if (image->storage_class == PseudoClass) (void) SetImageStorageClass(image,DirectClass); if (image->colorspace != CMYKColorspace) num_channels=(image->matte != MagickFalse ? 4UL : 3UL); else num_channels=(image->matte != MagickFalse ? 5UL : 4UL); } (void) WriteBlobMSBShort(image,(unsigned short) num_channels); (void) WriteBlobMSBLong(image,(unsigned int) image->rows); (void) WriteBlobMSBLong(image,(unsigned int) image->columns); if (IsGrayImage(image,&image->exception) != MagickFalse) { MagickBooleanType monochrome; /* Write depth & mode. */ monochrome=IsMonochromeImage(image,&image->exception) && (image->depth == 1) ? MagickTrue : MagickFalse; (void) WriteBlobMSBShort(image,(unsigned short) (monochrome != MagickFalse ? 1 : image->depth > 8 ? 16 : 8)); (void) WriteBlobMSBShort(image,(unsigned short) (monochrome != MagickFalse ? BitmapMode : GrayscaleMode)); } else { (void) WriteBlobMSBShort(image,(unsigned short) (image->storage_class == PseudoClass ? 8 : image->depth > 8 ? 16 : 8)); if (((image_info->colorspace != UndefinedColorspace) || (image->colorspace != CMYKColorspace)) && (image_info->colorspace != CMYKColorspace)) { (void) TransformImageColorspace(image,sRGBColorspace); (void) WriteBlobMSBShort(image,(unsigned short) (image->storage_class == PseudoClass ? IndexedMode : RGBMode)); } else { if (image->colorspace != CMYKColorspace) (void) TransformImageColorspace(image,CMYKColorspace); (void) WriteBlobMSBShort(image,CMYKMode); } } if ((IsGrayImage(image,&image->exception) != MagickFalse) || (image->storage_class == DirectClass) || (image->colors > 256)) (void) WriteBlobMSBLong(image,0); else { /* Write PSD raster colormap. */ (void) WriteBlobMSBLong(image,768); for (i=0; i < (ssize_t) image->colors; i++) (void) WriteBlobByte(image,ScaleQuantumToChar(image->colormap[i].red)); for ( ; i < 256; i++) (void) WriteBlobByte(image,0); for (i=0; i < (ssize_t) image->colors; i++) (void) WriteBlobByte(image,ScaleQuantumToChar( image->colormap[i].green)); for ( ; i < 256; i++) (void) WriteBlobByte(image,0); for (i=0; i < (ssize_t) image->colors; i++) (void) WriteBlobByte(image,ScaleQuantumToChar(image->colormap[i].blue)); for ( ; i < 256; i++) (void) WriteBlobByte(image,0); } /* Image resource block. */ length=28; /* 0x03EB */ bim_profile=(StringInfo *) GetImageProfile(image,"8bim"); icc_profile=GetImageProfile(image,"icc"); if (bim_profile != (StringInfo *) NULL) { bim_profile=CloneStringInfo(bim_profile); if (icc_profile != (StringInfo *) NULL) RemoveICCProfileFromResourceBlock(bim_profile); RemoveResolutionFromResourceBlock(bim_profile); length+=PSDQuantum(GetStringInfoLength(bim_profile)); } if (icc_profile != (const StringInfo *) NULL) length+=PSDQuantum(GetStringInfoLength(icc_profile))+12; (void) WriteBlobMSBLong(image,(unsigned int) length); WriteResolutionResourceBlock(image); if (bim_profile != (StringInfo *) NULL) { (void) WriteBlob(image,GetStringInfoLength(bim_profile), GetStringInfoDatum(bim_profile)); bim_profile=DestroyStringInfo(bim_profile); } if (icc_profile != (StringInfo *) NULL) { (void) WriteBlob(image,4,(const unsigned char *) "8BIM"); (void) WriteBlobMSBShort(image,0x0000040F); (void) WriteBlobMSBShort(image,0); (void) WriteBlobMSBLong(image,(unsigned int) GetStringInfoLength( icc_profile)); (void) WriteBlob(image,GetStringInfoLength(icc_profile), GetStringInfoDatum(icc_profile)); if ((ssize_t) GetStringInfoLength(icc_profile) != PSDQuantum(GetStringInfoLength(icc_profile))) (void) WriteBlobByte(image,0); } base_image=GetNextImageInList(image); if (base_image == (Image *)NULL) base_image=image; size=0; size_offset=TellBlob(image); (void) SetPSDSize(&psd_info,image,0); (void) SetPSDSize(&psd_info,image,0); layer_count=0; for (next_image=base_image; next_image != NULL; ) { layer_count++; next_image=GetNextImageInList(next_image); } if (image->matte != MagickFalse) size+=WriteBlobMSBShort(image,-(unsigned short) layer_count); else size+=WriteBlobMSBShort(image,(unsigned short) layer_count); layer_size_offsets=(MagickOffsetType *) AcquireQuantumMemory( (size_t) layer_count,sizeof(MagickOffsetType)); if (layer_size_offsets == (MagickOffsetType *) NULL) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); layer_index=0; for (next_image=base_image; next_image != NULL; ) { Image *mask; unsigned char default_color; unsigned short channels, total_channels; mask=(Image *) NULL; property=GetImageArtifact(next_image,"psd:opacity-mask"); default_color=0; if (property != (const char *) NULL) { mask=(Image *) GetImageRegistry(ImageRegistryType,property, &image->exception); default_color=(unsigned char) (strlen(property) == 9 ? 255 : 0); } size+=WriteBlobMSBLong(image,(unsigned int) next_image->page.y); size+=WriteBlobMSBLong(image,(unsigned int) next_image->page.x); size+=WriteBlobMSBLong(image,(unsigned int) (next_image->page.y+ next_image->rows)); size+=WriteBlobMSBLong(image,(unsigned int) (next_image->page.x+ next_image->columns)); channels=1; if ((next_image->storage_class != PseudoClass) && (IsGrayImage(next_image,&next_image->exception) == MagickFalse)) channels=(unsigned short) (next_image->colorspace == CMYKColorspace ? 4 : 3); total_channels=channels; if (next_image->matte != MagickFalse) total_channels++; if (mask != (Image *) NULL) total_channels++; size+=WriteBlobMSBShort(image,total_channels); layer_size_offsets[layer_index++]=TellBlob(image); for (i=0; i < (ssize_t) channels; i++) size+=WriteChannelSize(&psd_info,image,(signed short) i); if (next_image->matte != MagickFalse) size+=WriteChannelSize(&psd_info,image,-1); if (mask != (Image *) NULL) size+=WriteChannelSize(&psd_info,image,-2); size+=WriteBlob(image,4,(const unsigned char *) "8BIM"); size+=WriteBlob(image,4,(const unsigned char *) CompositeOperatorToPSDBlendMode(next_image)); property=GetImageArtifact(next_image,"psd:layer.opacity"); if (property != (const char *) NULL) { Quantum opacity; opacity=(Quantum) StringToInteger(property); size+=WriteBlobByte(image,ScaleQuantumToChar(opacity)); (void) ApplyPSDLayerOpacity(next_image,opacity,MagickTrue, &image->exception); } else size+=WriteBlobByte(image,255); size+=WriteBlobByte(image,0); size+=WriteBlobByte(image,(unsigned char) (next_image->compose == NoCompositeOp ? 1 << 0x02 : 1)); /* layer properties - visible, etc. */ size+=WriteBlobByte(image,0); info=GetAdditionalInformation(image_info,next_image); property=(const char *) GetImageProperty(next_image,"label"); if (property == (const char *) NULL) { (void) FormatLocaleString(layer_name,MaxTextExtent,"L%.20g", (double) layer_index); property=layer_name; } name_length=strlen(property)+1; if ((name_length % 4) != 0) name_length+=(4-(name_length % 4)); if (info != (const StringInfo *) NULL) name_length+=GetStringInfoLength(info); name_length+=8; if (mask != (Image *) NULL) name_length+=20; size+=WriteBlobMSBLong(image,(unsigned int) name_length); if (mask == (Image *) NULL) size+=WriteBlobMSBLong(image,0); else { if (mask->compose != NoCompositeOp) (void) ApplyPSDOpacityMask(next_image,mask,ScaleCharToQuantum( default_color),MagickTrue,&image->exception); mask->page.y+=image->page.y; mask->page.x+=image->page.x; size+=WriteBlobMSBLong(image,20); size+=WriteBlobMSBSignedLong(image,(const signed int) mask->page.y); size+=WriteBlobMSBSignedLong(image,(const signed int) mask->page.x); size+=WriteBlobMSBSignedLong(image,(const signed int) (mask->rows+ mask->page.y)); size+=WriteBlobMSBSignedLong(image,(const signed int) (mask->columns+ mask->page.x)); size+=WriteBlobByte(image,default_color); size+=WriteBlobByte(image,(unsigned char) ( mask->compose == NoCompositeOp ? 2 : 0)); size+=WriteBlobMSBShort(image,0); } size+=WriteBlobMSBLong(image,0); size+=WritePascalString(image,property,4); if (info != (const StringInfo *) NULL) size+=WriteBlob(image,GetStringInfoLength(info),GetStringInfoDatum(info)); next_image=GetNextImageInList(next_image); } /* Now the image data! */ next_image=base_image; layer_index=0; while (next_image != NULL) { length=(size_t) WritePSDChannels(&psd_info,image_info,image,next_image, layer_size_offsets[layer_index++],MagickTrue); if (length == 0) { status=MagickFalse; break; } size+=length; next_image=GetNextImageInList(next_image); } (void) WriteBlobMSBLong(image,0); /* user mask data */ /* Remove the opacity mask from the registry */ next_image=base_image; while (next_image != (Image *) NULL) { property=GetImageArtifact(next_image,"psd:opacity-mask"); if (property != (const char *) NULL) (void) DeleteImageRegistry(property); next_image=GetNextImageInList(next_image); } /* Write the total size */ size_offset+=WritePSDSize(&psd_info,image,size+ (psd_info.version == 1 ? 8 : 12),size_offset); if ((size/2) != ((size+1)/2)) rounded_size=size+1; else rounded_size=size; (void) WritePSDSize(&psd_info,image,rounded_size,size_offset); layer_size_offsets=(MagickOffsetType *) RelinquishMagickMemory( layer_size_offsets); /* Write composite image. */ if (status != MagickFalse) { CompressionType compression; compression=image->compression; if (image_info->compression != UndefinedCompression) image->compression=image_info->compression; if (image->compression == ZipCompression) image->compression=RLECompression; if (WritePSDChannels(&psd_info,image_info,image,image,0, MagickFalse) == 0) status=MagickFalse; image->compression=compression; } (void) CloseBlob(image); return(status); }
pl10-1.c
#include <omp.h> #include <stdio.h> int main() { double T1 = omp_get_wtime(); #pragma omp parallel { int tid = omp_get_thread_num(); printf("Hello, world! from thread %d\n", tid); if (tid == 0) { printf("cores: %d\n", omp_get_num_procs()); printf("threads: %d\n", omp_get_num_threads()); } } double T2 = omp_get_wtime(); printf("That's all, folks! Time %d \n", T2 - T1); }
GB_binop__ne_uint64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__ne_uint64) // A.*B function (eWiseMult): GB (_AemultB_08__ne_uint64) // A.*B function (eWiseMult): GB (_AemultB_02__ne_uint64) // A.*B function (eWiseMult): GB (_AemultB_04__ne_uint64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__ne_uint64) // A*D function (colscale): GB (_AxD__ne_uint64) // D*A function (rowscale): GB (_DxB__ne_uint64) // C+=B function (dense accum): GB (_Cdense_accumB__ne_uint64) // C+=b function (dense accum): GB (_Cdense_accumb__ne_uint64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__ne_uint64) // C=scalar+B GB (_bind1st__ne_uint64) // C=scalar+B' GB (_bind1st_tran__ne_uint64) // C=A+scalar GB (_bind2nd__ne_uint64) // C=A'+scalar GB (_bind2nd_tran__ne_uint64) // C type: bool // A type: uint64_t // B,b type: uint64_t // BinaryOp: cij = (aij != bij) #define GB_ATYPE \ uint64_t #define GB_BTYPE \ uint64_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint64_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint64_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x != y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_NE || GxB_NO_UINT64 || GxB_NO_NE_UINT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__ne_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__ne_uint64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__ne_uint64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type uint64_t uint64_t bwork = (*((uint64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__ne_uint64) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__ne_uint64) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__ne_uint64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__ne_uint64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__ne_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__ne_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__ne_uint64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__ne_uint64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; uint64_t x = (*((uint64_t *) x_input)) ; uint64_t *Bx = (uint64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint64_t bij = GBX (Bx, p, false) ; Cx [p] = (x != bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__ne_uint64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; uint64_t *Ax = (uint64_t *) Ax_input ; uint64_t y = (*((uint64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint64_t aij = GBX (Ax, p, false) ; Cx [p] = (aij != y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x != aij) ; \ } GrB_Info GB (_bind1st_tran__ne_uint64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t x = (*((const uint64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij != y) ; \ } GrB_Info GB (_bind2nd_tran__ne_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t y = (*((const uint64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
enhance.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % EEEEE N N H H AAA N N CCCC EEEEE % % E NN N H H A A NN N C E % % EEE N N N HHHHH AAAAA N N N C EEE % % E N NN H H A A N NN C E % % EEEEE N N H H A A N N CCCC EEEEE % % % % % % MagickCore Image Enhancement Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/accelerate-private.h" #include "magick/artifact.h" #include "magick/attribute.h" #include "magick/cache.h" #include "magick/cache-view.h" #include "magick/channel.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/composite-private.h" #include "magick/enhance.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/fx.h" #include "magick/gem.h" #include "magick/geometry.h" #include "magick/histogram.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/memory_.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/opencl.h" #include "magick/opencl-private.h" #include "magick/option.h" #include "magick/pixel-accessor.h" #include "magick/pixel-private.h" #include "magick/quantum.h" #include "magick/quantum-private.h" #include "magick/resample.h" #include "magick/resample-private.h" #include "magick/resource_.h" #include "magick/statistic.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/thread-private.h" #include "magick/threshold.h" #include "magick/token.h" #include "magick/xml-tree.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A u t o G a m m a I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AutoGammaImage() extract the 'mean' from the image and adjust the image % to try make set its gamma appropriatally. % % The format of the AutoGammaImage method is: % % MagickBooleanType AutoGammaImage(Image *image) % MagickBooleanType AutoGammaImageChannel(Image *image, % const ChannelType channel) % % A description of each parameter follows: % % o image: The image to auto-level % % o channel: The channels to auto-level. If the special 'SyncChannels' % flag is set all given channels is adjusted in the same way using the % mean average of those channels. % */ MagickExport MagickBooleanType AutoGammaImage(Image *image) { return(AutoGammaImageChannel(image,DefaultChannels)); } MagickExport MagickBooleanType AutoGammaImageChannel(Image *image, const ChannelType channel) { double gamma, mean, logmean, sans; MagickStatusType status; logmean=log(0.5); if ((channel & SyncChannels) != 0) { /* Apply gamma correction equally accross all given channels */ (void) GetImageChannelMean(image,channel,&mean,&sans,&image->exception); gamma=log(mean*QuantumScale)/logmean; return(LevelImageChannel(image,channel,0.0,(double) QuantumRange,gamma)); } /* Auto-gamma each channel separateally */ status = MagickTrue; if ((channel & RedChannel) != 0) { (void) GetImageChannelMean(image,RedChannel,&mean,&sans, &image->exception); gamma=log(mean*QuantumScale)/logmean; status&=LevelImageChannel(image,RedChannel,0.0,(double) QuantumRange, gamma); } if ((channel & GreenChannel) != 0) { (void) GetImageChannelMean(image,GreenChannel,&mean,&sans, &image->exception); gamma=log(mean*QuantumScale)/logmean; status&=LevelImageChannel(image,GreenChannel,0.0,(double) QuantumRange, gamma); } if ((channel & BlueChannel) != 0) { (void) GetImageChannelMean(image,BlueChannel,&mean,&sans, &image->exception); gamma=log(mean*QuantumScale)/logmean; status&=LevelImageChannel(image,BlueChannel,0.0,(double) QuantumRange, gamma); } if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) { (void) GetImageChannelMean(image,OpacityChannel,&mean,&sans, &image->exception); gamma=log(mean*QuantumScale)/logmean; status&=LevelImageChannel(image,OpacityChannel,0.0,(double) QuantumRange, gamma); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { (void) GetImageChannelMean(image,IndexChannel,&mean,&sans, &image->exception); gamma=log(mean*QuantumScale)/logmean; status&=LevelImageChannel(image,IndexChannel,0.0,(double) QuantumRange, gamma); } return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A u t o L e v e l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AutoLevelImage() adjusts the levels of a particular image channel by % scaling the minimum and maximum values to the full quantum range. % % The format of the LevelImage method is: % % MagickBooleanType AutoLevelImage(Image *image) % MagickBooleanType AutoLevelImageChannel(Image *image, % const ChannelType channel) % % A description of each parameter follows: % % o image: The image to auto-level % % o channel: The channels to auto-level. If the special 'SyncChannels' % flag is set the min/max/mean value of all given channels is used for % all given channels, to all channels in the same way. % */ MagickExport MagickBooleanType AutoLevelImage(Image *image) { return(AutoLevelImageChannel(image,DefaultChannels)); } MagickExport MagickBooleanType AutoLevelImageChannel(Image *image, const ChannelType channel) { /* Convenience method for a min/max histogram stretch. */ return(MinMaxStretchImage(image,channel,0.0,0.0)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B r i g h t n e s s C o n t r a s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BrightnessContrastImage() changes the brightness and/or contrast of an % image. It converts the brightness and contrast parameters into slope and % intercept and calls a polynomical function to apply to the image. % % The format of the BrightnessContrastImage method is: % % MagickBooleanType BrightnessContrastImage(Image *image, % const double brightness,const double contrast) % MagickBooleanType BrightnessContrastImageChannel(Image *image, % const ChannelType channel,const double brightness, % const double contrast) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o brightness: the brightness percent (-100 .. 100). % % o contrast: the contrast percent (-100 .. 100). % */ MagickExport MagickBooleanType BrightnessContrastImage(Image *image, const double brightness,const double contrast) { MagickBooleanType status; status=BrightnessContrastImageChannel(image,DefaultChannels,brightness, contrast); return(status); } MagickExport MagickBooleanType BrightnessContrastImageChannel(Image *image, const ChannelType channel,const double brightness,const double contrast) { #define BrightnessContastImageTag "BrightnessContast/Image" double alpha, intercept, coefficients[2], slope; MagickBooleanType status; /* Compute slope and intercept. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); alpha=contrast; slope=tan((double) (MagickPI*(alpha/100.0+1.0)/4.0)); if (slope < 0.0) slope=0.0; intercept=brightness/100.0+((100-brightness)/200.0)*(1.0-slope); coefficients[0]=slope; coefficients[1]=intercept; status=FunctionImageChannel(image,channel,PolynomialFunction,2,coefficients, &image->exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o l o r D e c i s i o n L i s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ColorDecisionListImage() accepts a lightweight Color Correction Collection % (CCC) file which solely contains one or more color corrections and applies % the correction to the image. Here is a sample CCC file: % % <ColorCorrectionCollection xmlns="urn:ASC:CDL:v1.2"> % <ColorCorrection id="cc03345"> % <SOPNode> % <Slope> 0.9 1.2 0.5 </Slope> % <Offset> 0.4 -0.5 0.6 </Offset> % <Power> 1.0 0.8 1.5 </Power> % </SOPNode> % <SATNode> % <Saturation> 0.85 </Saturation> % </SATNode> % </ColorCorrection> % </ColorCorrectionCollection> % % which includes the slop, offset, and power for each of the RGB channels % as well as the saturation. % % The format of the ColorDecisionListImage method is: % % MagickBooleanType ColorDecisionListImage(Image *image, % const char *color_correction_collection) % % A description of each parameter follows: % % o image: the image. % % o color_correction_collection: the color correction collection in XML. % */ MagickExport MagickBooleanType ColorDecisionListImage(Image *image, const char *color_correction_collection) { #define ColorDecisionListCorrectImageTag "ColorDecisionList/Image" typedef struct _Correction { double slope, offset, power; } Correction; typedef struct _ColorCorrection { Correction red, green, blue; double saturation; } ColorCorrection; CacheView *image_view; char token[MaxTextExtent]; ColorCorrection color_correction; const char *content, *p; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; PixelPacket *cdl_map; ssize_t i; ssize_t y; XMLTreeInfo *cc, *ccc, *sat, *sop; /* Allocate and initialize cdl maps. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (color_correction_collection == (const char *) NULL) return(MagickFalse); exception=(&image->exception); ccc=NewXMLTree((const char *) color_correction_collection,&image->exception); if (ccc == (XMLTreeInfo *) NULL) return(MagickFalse); cc=GetXMLTreeChild(ccc,"ColorCorrection"); if (cc == (XMLTreeInfo *) NULL) { ccc=DestroyXMLTree(ccc); return(MagickFalse); } color_correction.red.slope=1.0; color_correction.red.offset=0.0; color_correction.red.power=1.0; color_correction.green.slope=1.0; color_correction.green.offset=0.0; color_correction.green.power=1.0; color_correction.blue.slope=1.0; color_correction.blue.offset=0.0; color_correction.blue.power=1.0; color_correction.saturation=0.0; sop=GetXMLTreeChild(cc,"SOPNode"); if (sop != (XMLTreeInfo *) NULL) { XMLTreeInfo *offset, *power, *slope; slope=GetXMLTreeChild(sop,"Slope"); if (slope != (XMLTreeInfo *) NULL) { content=GetXMLTreeContent(slope); p=(const char *) content; for (i=0; (*p != '\0') && (i < 3); i++) { (void) GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MaxTextExtent,token); switch (i) { case 0: { color_correction.red.slope=StringToDouble(token,(char **) NULL); break; } case 1: { color_correction.green.slope=StringToDouble(token, (char **) NULL); break; } case 2: { color_correction.blue.slope=StringToDouble(token, (char **) NULL); break; } } } } offset=GetXMLTreeChild(sop,"Offset"); if (offset != (XMLTreeInfo *) NULL) { content=GetXMLTreeContent(offset); p=(const char *) content; for (i=0; (*p != '\0') && (i < 3); i++) { (void) GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MaxTextExtent,token); switch (i) { case 0: { color_correction.red.offset=StringToDouble(token, (char **) NULL); break; } case 1: { color_correction.green.offset=StringToDouble(token, (char **) NULL); break; } case 2: { color_correction.blue.offset=StringToDouble(token, (char **) NULL); break; } } } } power=GetXMLTreeChild(sop,"Power"); if (power != (XMLTreeInfo *) NULL) { content=GetXMLTreeContent(power); p=(const char *) content; for (i=0; (*p != '\0') && (i < 3); i++) { (void) GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MaxTextExtent,token); switch (i) { case 0: { color_correction.red.power=StringToDouble(token,(char **) NULL); break; } case 1: { color_correction.green.power=StringToDouble(token, (char **) NULL); break; } case 2: { color_correction.blue.power=StringToDouble(token, (char **) NULL); break; } } } } } sat=GetXMLTreeChild(cc,"SATNode"); if (sat != (XMLTreeInfo *) NULL) { XMLTreeInfo *saturation; saturation=GetXMLTreeChild(sat,"Saturation"); if (saturation != (XMLTreeInfo *) NULL) { content=GetXMLTreeContent(saturation); p=(const char *) content; (void) GetNextToken(p,&p,MaxTextExtent,token); color_correction.saturation=StringToDouble(token,(char **) NULL); } } ccc=DestroyXMLTree(ccc); if (image->debug != MagickFalse) { (void) LogMagickEvent(TransformEvent,GetMagickModule(), " Color Correction Collection:"); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.red.slope: %g",color_correction.red.slope); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.red.offset: %g",color_correction.red.offset); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.red.power: %g",color_correction.red.power); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.green.slope: %g",color_correction.green.slope); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.green.offset: %g",color_correction.green.offset); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.green.power: %g",color_correction.green.power); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.blue.slope: %g",color_correction.blue.slope); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.blue.offset: %g",color_correction.blue.offset); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.blue.power: %g",color_correction.blue.power); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.saturation: %g",color_correction.saturation); } cdl_map=(PixelPacket *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*cdl_map)); if (cdl_map == (PixelPacket *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); for (i=0; i <= (ssize_t) MaxMap; i++) { cdl_map[i].red=ClampToQuantum((MagickRealType) ScaleMapToQuantum(( MagickRealType) (MaxMap*(pow(color_correction.red.slope*i/MaxMap+ color_correction.red.offset,color_correction.red.power))))); cdl_map[i].green=ClampToQuantum((MagickRealType) ScaleMapToQuantum(( MagickRealType) (MaxMap*(pow(color_correction.green.slope*i/MaxMap+ color_correction.green.offset,color_correction.green.power))))); cdl_map[i].blue=ClampToQuantum((MagickRealType) ScaleMapToQuantum(( MagickRealType) (MaxMap*(pow(color_correction.blue.slope*i/MaxMap+ color_correction.blue.offset,color_correction.blue.power))))); } if (image->storage_class == PseudoClass) { /* Apply transfer function to colormap. */ for (i=0; i < (ssize_t) image->colors; i++) { double luma; luma=0.212656*image->colormap[i].red+0.715158*image->colormap[i].green+ 0.072186*image->colormap[i].blue; image->colormap[i].red=ClampToQuantum(luma+color_correction.saturation* cdl_map[ScaleQuantumToMap(image->colormap[i].red)].red-luma); image->colormap[i].green=ClampToQuantum(luma+ color_correction.saturation*cdl_map[ScaleQuantumToMap( image->colormap[i].green)].green-luma); image->colormap[i].blue=ClampToQuantum(luma+color_correction.saturation* cdl_map[ScaleQuantumToMap(image->colormap[i].blue)].blue-luma); } } /* Apply transfer function to image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { double luma; PixelPacket *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { luma=0.212656*GetPixelRed(q)+0.715158*GetPixelGreen(q)+ 0.072186*GetPixelBlue(q); SetPixelRed(q,ClampToQuantum(luma+color_correction.saturation* (cdl_map[ScaleQuantumToMap(GetPixelRed(q))].red-luma))); SetPixelGreen(q,ClampToQuantum(luma+color_correction.saturation* (cdl_map[ScaleQuantumToMap(GetPixelGreen(q))].green-luma))); SetPixelBlue(q,ClampToQuantum(luma+color_correction.saturation* (cdl_map[ScaleQuantumToMap(GetPixelBlue(q))].blue-luma))); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ColorDecisionListCorrectImageTag, progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); cdl_map=(PixelPacket *) RelinquishMagickMemory(cdl_map); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l u t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClutImage() replaces each color value in the given image, by using it as an % index to lookup a replacement color value in a Color Look UP Table in the % form of an image. The values are extracted along a diagonal of the CLUT % image so either a horizontal or vertial gradient image can be used. % % Typically this is used to either re-color a gray-scale image according to a % color gradient in the CLUT image, or to perform a freeform histogram % (level) adjustment according to the (typically gray-scale) gradient in the % CLUT image. % % When the 'channel' mask includes the matte/alpha transparency channel but % one image has no such channel it is assumed that that image is a simple % gray-scale image that will effect the alpha channel values, either for % gray-scale coloring (with transparent or semi-transparent colors), or % a histogram adjustment of existing alpha channel values. If both images % have matte channels, direct and normal indexing is applied, which is rarely % used. % % The format of the ClutImage method is: % % MagickBooleanType ClutImage(Image *image,Image *clut_image) % MagickBooleanType ClutImageChannel(Image *image, % const ChannelType channel,Image *clut_image) % % A description of each parameter follows: % % o image: the image, which is replaced by indexed CLUT values % % o clut_image: the color lookup table image for replacement color values. % % o channel: the channel. % */ MagickExport MagickBooleanType ClutImage(Image *image,const Image *clut_image) { return(ClutImageChannel(image,DefaultChannels,clut_image)); } MagickExport MagickBooleanType ClutImageChannel(Image *image, const ChannelType channel,const Image *clut_image) { #define ClutImageTag "Clut/Image" CacheView *clut_view, *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket *clut_map; ssize_t i; ssize_t adjust, y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(clut_image != (Image *) NULL); assert(clut_image->signature == MagickCoreSignature); exception=(&image->exception); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); if ((IsGrayColorspace(image->colorspace) != MagickFalse) && (IsGrayColorspace(clut_image->colorspace) == MagickFalse)) (void) SetImageColorspace(image,sRGBColorspace); clut_map=(MagickPixelPacket *) AcquireQuantumMemory(MaxMap+1UL, sizeof(*clut_map)); if (clut_map == (MagickPixelPacket *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); /* Clut image. */ status=MagickTrue; progress=0; adjust=(ssize_t) (clut_image->interpolate == IntegerInterpolatePixel ? 0 : 1); clut_view=AcquireAuthenticCacheView(clut_image,exception); for (i=0; i <= (ssize_t) MaxMap; i++) { GetMagickPixelPacket(clut_image,clut_map+i); status=InterpolateMagickPixelPacket(clut_image,clut_view, UndefinedInterpolatePixel,(double) i*(clut_image->columns-adjust)/MaxMap, (double) i*(clut_image->rows-adjust)/MaxMap,clut_map+i,exception); if (status == MagickFalse) break; } clut_view=DestroyCacheView(clut_view); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickPixelPacket pixel; IndexPacket *magick_restrict indexes; PixelPacket *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); GetMagickPixelPacket(image,&pixel); for (x=0; x < (ssize_t) image->columns; x++) { SetMagickPixelPacket(image,q,indexes+x,&pixel); if ((channel & RedChannel) != 0) SetPixelRed(q,ClampPixelRed(clut_map+ ScaleQuantumToMap(GetPixelRed(q)))); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampPixelGreen(clut_map+ ScaleQuantumToMap(GetPixelGreen(q)))); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampPixelBlue(clut_map+ ScaleQuantumToMap(GetPixelBlue(q)))); if ((channel & OpacityChannel) != 0) { if (clut_image->matte == MagickFalse) SetPixelAlpha(q,MagickPixelIntensityToQuantum(clut_map+ ScaleQuantumToMap((Quantum) GetPixelAlpha(q)))); else if (image->matte == MagickFalse) SetPixelOpacity(q,ClampPixelOpacity(clut_map+ ScaleQuantumToMap((Quantum) MagickPixelIntensity(&pixel)))); else SetPixelOpacity(q,ClampPixelOpacity( clut_map+ScaleQuantumToMap(GetPixelOpacity(q)))); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,ClampToQuantum((clut_map+(ssize_t) GetPixelIndex(indexes+x))->index)); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ClutImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); clut_map=(MagickPixelPacket *) RelinquishMagickMemory(clut_map); if ((clut_image->matte != MagickFalse) && ((channel & OpacityChannel) != 0)) (void) SetImageAlphaChannel(image,ActivateAlphaChannel); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o n t r a s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ContrastImage() enhances the intensity differences between the lighter and % darker elements of the image. Set sharpen to a MagickTrue to increase the % image contrast otherwise the contrast is reduced. % % The format of the ContrastImage method is: % % MagickBooleanType ContrastImage(Image *image, % const MagickBooleanType sharpen) % % A description of each parameter follows: % % o image: the image. % % o sharpen: Increase or decrease image contrast. % */ static void Contrast(const int sign,Quantum *red,Quantum *green,Quantum *blue) { double brightness, hue, saturation; /* Enhance contrast: dark color become darker, light color become lighter. */ assert(red != (Quantum *) NULL); assert(green != (Quantum *) NULL); assert(blue != (Quantum *) NULL); hue=0.0; saturation=0.0; brightness=0.0; ConvertRGBToHSB(*red,*green,*blue,&hue,&saturation,&brightness); brightness+=0.5*sign*(0.5*(sin((double) (MagickPI*(brightness-0.5)))+1.0)- brightness); if (brightness > 1.0) brightness=1.0; else if (brightness < 0.0) brightness=0.0; ConvertHSBToRGB(hue,saturation,brightness,red,green,blue); } MagickExport MagickBooleanType ContrastImage(Image *image, const MagickBooleanType sharpen) { #define ContrastImageTag "Contrast/Image" CacheView *image_view; ExceptionInfo *exception; int sign; MagickBooleanType status; MagickOffsetType progress; ssize_t i; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); sign=sharpen != MagickFalse ? 1 : -1; if (image->storage_class == PseudoClass) { /* Contrast enhance colormap. */ for (i=0; i < (ssize_t) image->colors; i++) Contrast(sign,&image->colormap[i].red,&image->colormap[i].green, &image->colormap[i].blue); } /* Contrast enhance image. */ #if defined(MAGICKCORE_OPENCL_SUPPORT) status=AccelerateContrastImage(image,sharpen,&image->exception); if (status != MagickFalse) return status; #endif status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum blue, green, red; PixelPacket *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { red=GetPixelRed(q); green=GetPixelGreen(q); blue=GetPixelBlue(q); Contrast(sign,&red,&green,&blue); SetPixelRed(q,red); SetPixelGreen(q,green); SetPixelBlue(q,blue); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ContrastImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o n t r a s t S t r e t c h I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ContrastStretchImage() is a simple image enhancement technique that attempts % to improve the contrast in an image by `stretching' the range of intensity % values it contains to span a desired range of values. It differs from the % more sophisticated histogram equalization in that it can only apply a % linear scaling function to the image pixel values. As a result the % `enhancement' is less harsh. % % The format of the ContrastStretchImage method is: % % MagickBooleanType ContrastStretchImage(Image *image, % const char *levels) % MagickBooleanType ContrastStretchImageChannel(Image *image, % const size_t channel,const double black_point, % const double white_point) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o black_point: the black point. % % o white_point: the white point. % % o levels: Specify the levels where the black and white points have the % range of 0 to number-of-pixels (e.g. 1%, 10x90%, etc.). % */ MagickExport MagickBooleanType ContrastStretchImage(Image *image, const char *levels) { double black_point = 0.0, white_point = (double) image->columns*image->rows; GeometryInfo geometry_info; MagickBooleanType status; MagickStatusType flags; /* Parse levels. */ if (levels == (char *) NULL) return(MagickFalse); flags=ParseGeometry(levels,&geometry_info); if ((flags & RhoValue) != 0) black_point=geometry_info.rho; if ((flags & SigmaValue) != 0) white_point=geometry_info.sigma; if ((flags & PercentValue) != 0) { black_point*=(double) QuantumRange/100.0; white_point*=(double) QuantumRange/100.0; } if ((flags & SigmaValue) == 0) white_point=(double) image->columns*image->rows-black_point; status=ContrastStretchImageChannel(image,DefaultChannels,black_point, white_point); return(status); } MagickExport MagickBooleanType ContrastStretchImageChannel(Image *image, const ChannelType channel,const double black_point,const double white_point) { #define MaxRange(color) ((MagickRealType) ScaleQuantumToMap((Quantum) (color))) #define ContrastStretchImageTag "ContrastStretch/Image" CacheView *image_view; double intensity; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket black, *histogram, white; QuantumPixelPacket *stretch_map; ssize_t i; ssize_t y; /* Allocate histogram and stretch map. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); exception=(&image->exception); #if defined(MAGICKCORE_OPENCL_SUPPORT) && 0 /* Call OpenCL version */ status=AccelerateContrastStretchImageChannel(image,channel,black_point, white_point,&image->exception); if (status != MagickFalse) return status; #endif histogram=(MagickPixelPacket *) AcquireQuantumMemory(MaxMap+1UL, sizeof(*histogram)); stretch_map=(QuantumPixelPacket *) AcquireQuantumMemory(MaxMap+1UL, sizeof(*stretch_map)); if ((histogram == (MagickPixelPacket *) NULL) || (stretch_map == (QuantumPixelPacket *) NULL)) { if (stretch_map != (QuantumPixelPacket *) NULL) stretch_map=(QuantumPixelPacket *) RelinquishMagickMemory(stretch_map); if (histogram != (MagickPixelPacket *) NULL) histogram=(MagickPixelPacket *) RelinquishMagickMemory(histogram); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } /* Form histogram. */ if (SetImageGray(image,exception) != MagickFalse) (void) SetImageColorspace(image,GRAYColorspace); status=MagickTrue; (void) memset(histogram,0,(MaxMap+1)*sizeof(*histogram)); image_view=AcquireAuthenticCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { const PixelPacket *magick_restrict p; IndexPacket *magick_restrict indexes; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); if ((channel & SyncChannels) != 0) for (x=0; x < (ssize_t) image->columns; x++) { Quantum intensity; intensity=ClampToQuantum(GetPixelIntensity(image,p)); histogram[ScaleQuantumToMap(intensity)].red++; histogram[ScaleQuantumToMap(intensity)].green++; histogram[ScaleQuantumToMap(intensity)].blue++; histogram[ScaleQuantumToMap(intensity)].index++; p++; } else for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) histogram[ScaleQuantumToMap(GetPixelRed(p))].red++; if ((channel & GreenChannel) != 0) histogram[ScaleQuantumToMap(GetPixelGreen(p))].green++; if ((channel & BlueChannel) != 0) histogram[ScaleQuantumToMap(GetPixelBlue(p))].blue++; if ((channel & OpacityChannel) != 0) histogram[ScaleQuantumToMap(GetPixelOpacity(p))].opacity++; if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) histogram[ScaleQuantumToMap(GetPixelIndex(indexes+x))].index++; p++; } } /* Find the histogram boundaries by locating the black/white levels. */ black.red=0.0; white.red=MaxRange(QuantumRange); if ((channel & RedChannel) != 0) { intensity=0.0; for (i=0; i <= (ssize_t) MaxMap; i++) { intensity+=histogram[i].red; if (intensity > black_point) break; } black.red=(MagickRealType) i; intensity=0.0; for (i=(ssize_t) MaxMap; i != 0; i--) { intensity+=histogram[i].red; if (intensity > ((double) image->columns*image->rows-white_point)) break; } white.red=(MagickRealType) i; } black.green=0.0; white.green=MaxRange(QuantumRange); if ((channel & GreenChannel) != 0) { intensity=0.0; for (i=0; i <= (ssize_t) MaxMap; i++) { intensity+=histogram[i].green; if (intensity > black_point) break; } black.green=(MagickRealType) i; intensity=0.0; for (i=(ssize_t) MaxMap; i != 0; i--) { intensity+=histogram[i].green; if (intensity > ((double) image->columns*image->rows-white_point)) break; } white.green=(MagickRealType) i; } black.blue=0.0; white.blue=MaxRange(QuantumRange); if ((channel & BlueChannel) != 0) { intensity=0.0; for (i=0; i <= (ssize_t) MaxMap; i++) { intensity+=histogram[i].blue; if (intensity > black_point) break; } black.blue=(MagickRealType) i; intensity=0.0; for (i=(ssize_t) MaxMap; i != 0; i--) { intensity+=histogram[i].blue; if (intensity > ((double) image->columns*image->rows-white_point)) break; } white.blue=(MagickRealType) i; } black.opacity=0.0; white.opacity=MaxRange(QuantumRange); if ((channel & OpacityChannel) != 0) { intensity=0.0; for (i=0; i <= (ssize_t) MaxMap; i++) { intensity+=histogram[i].opacity; if (intensity > black_point) break; } black.opacity=(MagickRealType) i; intensity=0.0; for (i=(ssize_t) MaxMap; i != 0; i--) { intensity+=histogram[i].opacity; if (intensity > ((double) image->columns*image->rows-white_point)) break; } white.opacity=(MagickRealType) i; } black.index=0.0; white.index=MaxRange(QuantumRange); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { intensity=0.0; for (i=0; i <= (ssize_t) MaxMap; i++) { intensity+=histogram[i].index; if (intensity > black_point) break; } black.index=(MagickRealType) i; intensity=0.0; for (i=(ssize_t) MaxMap; i != 0; i--) { intensity+=histogram[i].index; if (intensity > ((double) image->columns*image->rows-white_point)) break; } white.index=(MagickRealType) i; } histogram=(MagickPixelPacket *) RelinquishMagickMemory(histogram); /* Stretch the histogram to create the stretched image mapping. */ (void) memset(stretch_map,0,(MaxMap+1)*sizeof(*stretch_map)); for (i=0; i <= (ssize_t) MaxMap; i++) { if ((channel & RedChannel) != 0) { if (i < (ssize_t) black.red) stretch_map[i].red=(Quantum) 0; else if (i > (ssize_t) white.red) stretch_map[i].red=QuantumRange; else if (black.red != white.red) stretch_map[i].red=ScaleMapToQuantum((MagickRealType) (MaxMap* (i-black.red)/(white.red-black.red))); } if ((channel & GreenChannel) != 0) { if (i < (ssize_t) black.green) stretch_map[i].green=0; else if (i > (ssize_t) white.green) stretch_map[i].green=QuantumRange; else if (black.green != white.green) stretch_map[i].green=ScaleMapToQuantum((MagickRealType) (MaxMap* (i-black.green)/(white.green-black.green))); } if ((channel & BlueChannel) != 0) { if (i < (ssize_t) black.blue) stretch_map[i].blue=0; else if (i > (ssize_t) white.blue) stretch_map[i].blue= QuantumRange; else if (black.blue != white.blue) stretch_map[i].blue=ScaleMapToQuantum((MagickRealType) (MaxMap* (i-black.blue)/(white.blue-black.blue))); } if ((channel & OpacityChannel) != 0) { if (i < (ssize_t) black.opacity) stretch_map[i].opacity=0; else if (i > (ssize_t) white.opacity) stretch_map[i].opacity=QuantumRange; else if (black.opacity != white.opacity) stretch_map[i].opacity=ScaleMapToQuantum((MagickRealType) (MaxMap* (i-black.opacity)/(white.opacity-black.opacity))); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { if (i < (ssize_t) black.index) stretch_map[i].index=0; else if (i > (ssize_t) white.index) stretch_map[i].index=QuantumRange; else if (black.index != white.index) stretch_map[i].index=ScaleMapToQuantum((MagickRealType) (MaxMap* (i-black.index)/(white.index-black.index))); } } /* Stretch the image. */ if (((channel & OpacityChannel) != 0) || (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace))) image->storage_class=DirectClass; if (image->storage_class == PseudoClass) { /* Stretch colormap. */ for (i=0; i < (ssize_t) image->colors; i++) { if ((channel & RedChannel) != 0) { if (black.red != white.red) image->colormap[i].red=stretch_map[ ScaleQuantumToMap(image->colormap[i].red)].red; } if ((channel & GreenChannel) != 0) { if (black.green != white.green) image->colormap[i].green=stretch_map[ ScaleQuantumToMap(image->colormap[i].green)].green; } if ((channel & BlueChannel) != 0) { if (black.blue != white.blue) image->colormap[i].blue=stretch_map[ ScaleQuantumToMap(image->colormap[i].blue)].blue; } if ((channel & OpacityChannel) != 0) { if (black.opacity != white.opacity) image->colormap[i].opacity=stretch_map[ ScaleQuantumToMap(image->colormap[i].opacity)].opacity; } } } /* Stretch image. */ status=MagickTrue; progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { IndexPacket *magick_restrict indexes; PixelPacket *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) { if (black.red != white.red) SetPixelRed(q,stretch_map[ ScaleQuantumToMap(GetPixelRed(q))].red); } if ((channel & GreenChannel) != 0) { if (black.green != white.green) SetPixelGreen(q,stretch_map[ ScaleQuantumToMap(GetPixelGreen(q))].green); } if ((channel & BlueChannel) != 0) { if (black.blue != white.blue) SetPixelBlue(q,stretch_map[ ScaleQuantumToMap(GetPixelBlue(q))].blue); } if ((channel & OpacityChannel) != 0) { if (black.opacity != white.opacity) SetPixelOpacity(q,stretch_map[ ScaleQuantumToMap(GetPixelOpacity(q))].opacity); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { if (black.index != white.index) SetPixelIndex(indexes+x,stretch_map[ ScaleQuantumToMap(GetPixelIndex(indexes+x))].index); } q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ContrastStretchImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); stretch_map=(QuantumPixelPacket *) RelinquishMagickMemory(stretch_map); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E n h a n c e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % EnhanceImage() applies a digital filter that improves the quality of a % noisy image. % % The format of the EnhanceImage method is: % % Image *EnhanceImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *EnhanceImage(const Image *image,ExceptionInfo *exception) { #define EnhancePixel(weight) \ mean=QuantumScale*((double) GetPixelRed(r)+pixel.red)/2.0; \ distance=QuantumScale*((double) GetPixelRed(r)-pixel.red); \ distance_squared=(4.0+mean)*distance*distance; \ mean=QuantumScale*((double) GetPixelGreen(r)+pixel.green)/2.0; \ distance=QuantumScale*((double) GetPixelGreen(r)-pixel.green); \ distance_squared+=(7.0-mean)*distance*distance; \ mean=QuantumScale*((double) GetPixelBlue(r)+pixel.blue)/2.0; \ distance=QuantumScale*((double) GetPixelBlue(r)-pixel.blue); \ distance_squared+=(5.0-mean)*distance*distance; \ mean=QuantumScale*((double) GetPixelOpacity(r)+pixel.opacity)/2.0; \ distance=QuantumScale*((double) GetPixelOpacity(r)-pixel.opacity); \ distance_squared+=(5.0-mean)*distance*distance; \ if (distance_squared < 0.069) \ { \ aggregate.red+=(weight)*GetPixelRed(r); \ aggregate.green+=(weight)*GetPixelGreen(r); \ aggregate.blue+=(weight)*GetPixelBlue(r); \ aggregate.opacity+=(weight)*GetPixelOpacity(r); \ total_weight+=(weight); \ } \ r++; #define EnhanceImageTag "Enhance/Image" CacheView *enhance_view, *image_view; Image *enhance_image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket zero; ssize_t y; /* Initialize enhanced image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if ((image->columns < 5) || (image->rows < 5)) return((Image *) NULL); enhance_image=CloneImage(image,0,0,MagickTrue,exception); if (enhance_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(enhance_image,DirectClass) == MagickFalse) { InheritException(exception,&enhance_image->exception); enhance_image=DestroyImage(enhance_image); return((Image *) NULL); } /* Enhance image. */ status=MagickTrue; progress=0; (void) memset(&zero,0,sizeof(zero)); image_view=AcquireAuthenticCacheView(image,exception); enhance_view=AcquireAuthenticCacheView(enhance_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,enhance_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const PixelPacket *magick_restrict p; PixelPacket *magick_restrict q; ssize_t x; /* Read another scan line. */ if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-2,y-2,image->columns+4,5,exception); q=QueueCacheViewAuthenticPixels(enhance_view,0,y,enhance_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double distance, distance_squared, mean, total_weight; MagickPixelPacket aggregate; PixelPacket pixel; const PixelPacket *magick_restrict r; /* Compute weighted average of target pixel color components. */ aggregate=zero; total_weight=0.0; r=p+2*(image->columns+4)+2; pixel=(*r); r=p; EnhancePixel(5.0); EnhancePixel(8.0); EnhancePixel(10.0); EnhancePixel(8.0); EnhancePixel(5.0); r=p+(image->columns+4); EnhancePixel(8.0); EnhancePixel(20.0); EnhancePixel(40.0); EnhancePixel(20.0); EnhancePixel(8.0); r=p+2*(image->columns+4); EnhancePixel(10.0); EnhancePixel(40.0); EnhancePixel(80.0); EnhancePixel(40.0); EnhancePixel(10.0); r=p+3*(image->columns+4); EnhancePixel(8.0); EnhancePixel(20.0); EnhancePixel(40.0); EnhancePixel(20.0); EnhancePixel(8.0); r=p+4*(image->columns+4); EnhancePixel(5.0); EnhancePixel(8.0); EnhancePixel(10.0); EnhancePixel(8.0); EnhancePixel(5.0); if (total_weight > MagickEpsilon) { SetPixelRed(q,(aggregate.red+(total_weight/2)-1)/total_weight); SetPixelGreen(q,(aggregate.green+(total_weight/2)-1)/total_weight); SetPixelBlue(q,(aggregate.blue+(total_weight/2)-1)/total_weight); SetPixelOpacity(q,(aggregate.opacity+(total_weight/2)-1)/ total_weight); } p++; q++; } if (SyncCacheViewAuthenticPixels(enhance_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,EnhanceImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } enhance_view=DestroyCacheView(enhance_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) enhance_image=DestroyImage(enhance_image); return(enhance_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E q u a l i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % EqualizeImage() applies a histogram equalization to the image. % % The format of the EqualizeImage method is: % % MagickBooleanType EqualizeImage(Image *image) % MagickBooleanType EqualizeImageChannel(Image *image, % const ChannelType channel) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % */ MagickExport MagickBooleanType EqualizeImage(Image *image) { return(EqualizeImageChannel(image,DefaultChannels)); } MagickExport MagickBooleanType EqualizeImageChannel(Image *image, const ChannelType channel) { #define EqualizeImageTag "Equalize/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket black, *histogram, intensity, *map, white; QuantumPixelPacket *equalize_map; ssize_t i; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); exception=(&image->exception); #if defined(MAGICKCORE_OPENCL_SUPPORT) /* Call OpenCL version */ status=AccelerateEqualizeImage(image,channel,&image->exception); if (status != MagickFalse) return status; #endif /* Allocate and initialize histogram arrays. */ equalize_map=(QuantumPixelPacket *) AcquireQuantumMemory(MaxMap+1UL, sizeof(*equalize_map)); histogram=(MagickPixelPacket *) AcquireQuantumMemory(MaxMap+1UL, sizeof(*histogram)); map=(MagickPixelPacket *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*map)); if ((equalize_map == (QuantumPixelPacket *) NULL) || (histogram == (MagickPixelPacket *) NULL) || (map == (MagickPixelPacket *) NULL)) { if (map != (MagickPixelPacket *) NULL) map=(MagickPixelPacket *) RelinquishMagickMemory(map); if (histogram != (MagickPixelPacket *) NULL) histogram=(MagickPixelPacket *) RelinquishMagickMemory(histogram); if (equalize_map != (QuantumPixelPacket *) NULL) equalize_map=(QuantumPixelPacket *) RelinquishMagickMemory( equalize_map); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } /* Form histogram. */ (void) memset(histogram,0,(MaxMap+1)*sizeof(*histogram)); image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { const IndexPacket *magick_restrict indexes; const PixelPacket *magick_restrict p; ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; indexes=GetCacheViewVirtualIndexQueue(image_view); if ((channel & SyncChannels) != 0) for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType intensity=GetPixelIntensity(image,p); histogram[ScaleQuantumToMap(ClampToQuantum(intensity))].red++; p++; } else for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) histogram[ScaleQuantumToMap(GetPixelRed(p))].red++; if ((channel & GreenChannel) != 0) histogram[ScaleQuantumToMap(GetPixelGreen(p))].green++; if ((channel & BlueChannel) != 0) histogram[ScaleQuantumToMap(GetPixelBlue(p))].blue++; if ((channel & OpacityChannel) != 0) histogram[ScaleQuantumToMap(GetPixelOpacity(p))].opacity++; if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) histogram[ScaleQuantumToMap(GetPixelIndex(indexes+x))].index++; p++; } } image_view=DestroyCacheView(image_view); /* Integrate the histogram to get the equalization map. */ (void) memset(&intensity,0,sizeof(intensity)); for (i=0; i <= (ssize_t) MaxMap; i++) { if ((channel & SyncChannels) != 0) { intensity.red+=histogram[i].red; map[i]=intensity; continue; } if ((channel & RedChannel) != 0) intensity.red+=histogram[i].red; if ((channel & GreenChannel) != 0) intensity.green+=histogram[i].green; if ((channel & BlueChannel) != 0) intensity.blue+=histogram[i].blue; if ((channel & OpacityChannel) != 0) intensity.opacity+=histogram[i].opacity; if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) intensity.index+=histogram[i].index; map[i]=intensity; } black=map[0]; white=map[(int) MaxMap]; (void) memset(equalize_map,0,(MaxMap+1)*sizeof(*equalize_map)); for (i=0; i <= (ssize_t) MaxMap; i++) { if ((channel & SyncChannels) != 0) { if (white.red != black.red) equalize_map[i].red=ScaleMapToQuantum((MagickRealType) ((MaxMap* (map[i].red-black.red))/(white.red-black.red))); continue; } if (((channel & RedChannel) != 0) && (white.red != black.red)) equalize_map[i].red=ScaleMapToQuantum((MagickRealType) ((MaxMap* (map[i].red-black.red))/(white.red-black.red))); if (((channel & GreenChannel) != 0) && (white.green != black.green)) equalize_map[i].green=ScaleMapToQuantum((MagickRealType) ((MaxMap* (map[i].green-black.green))/(white.green-black.green))); if (((channel & BlueChannel) != 0) && (white.blue != black.blue)) equalize_map[i].blue=ScaleMapToQuantum((MagickRealType) ((MaxMap* (map[i].blue-black.blue))/(white.blue-black.blue))); if (((channel & OpacityChannel) != 0) && (white.opacity != black.opacity)) equalize_map[i].opacity=ScaleMapToQuantum((MagickRealType) ((MaxMap* (map[i].opacity-black.opacity))/(white.opacity-black.opacity))); if ((((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) && (white.index != black.index)) equalize_map[i].index=ScaleMapToQuantum((MagickRealType) ((MaxMap* (map[i].index-black.index))/(white.index-black.index))); } histogram=(MagickPixelPacket *) RelinquishMagickMemory(histogram); map=(MagickPixelPacket *) RelinquishMagickMemory(map); if (image->storage_class == PseudoClass) { /* Equalize colormap. */ for (i=0; i < (ssize_t) image->colors; i++) { if ((channel & SyncChannels) != 0) { if (white.red != black.red) { image->colormap[i].red=equalize_map[ ScaleQuantumToMap(image->colormap[i].red)].red; image->colormap[i].green=equalize_map[ ScaleQuantumToMap(image->colormap[i].green)].red; image->colormap[i].blue=equalize_map[ ScaleQuantumToMap(image->colormap[i].blue)].red; image->colormap[i].opacity=equalize_map[ ScaleQuantumToMap(image->colormap[i].opacity)].red; } continue; } if (((channel & RedChannel) != 0) && (white.red != black.red)) image->colormap[i].red=equalize_map[ ScaleQuantumToMap(image->colormap[i].red)].red; if (((channel & GreenChannel) != 0) && (white.green != black.green)) image->colormap[i].green=equalize_map[ ScaleQuantumToMap(image->colormap[i].green)].green; if (((channel & BlueChannel) != 0) && (white.blue != black.blue)) image->colormap[i].blue=equalize_map[ ScaleQuantumToMap(image->colormap[i].blue)].blue; if (((channel & OpacityChannel) != 0) && (white.opacity != black.opacity)) image->colormap[i].opacity=equalize_map[ ScaleQuantumToMap(image->colormap[i].opacity)].opacity; } } /* Equalize image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { IndexPacket *magick_restrict indexes; PixelPacket *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & SyncChannels) != 0) { if (white.red != black.red) { SetPixelRed(q,equalize_map[ ScaleQuantumToMap(GetPixelRed(q))].red); SetPixelGreen(q,equalize_map[ ScaleQuantumToMap(GetPixelGreen(q))].red); SetPixelBlue(q,equalize_map[ ScaleQuantumToMap(GetPixelBlue(q))].red); SetPixelOpacity(q,equalize_map[ ScaleQuantumToMap(GetPixelOpacity(q))].red); if (image->colorspace == CMYKColorspace) SetPixelIndex(indexes+x,equalize_map[ ScaleQuantumToMap(GetPixelIndex(indexes+x))].red); } q++; continue; } if (((channel & RedChannel) != 0) && (white.red != black.red)) SetPixelRed(q,equalize_map[ ScaleQuantumToMap(GetPixelRed(q))].red); if (((channel & GreenChannel) != 0) && (white.green != black.green)) SetPixelGreen(q,equalize_map[ ScaleQuantumToMap(GetPixelGreen(q))].green); if (((channel & BlueChannel) != 0) && (white.blue != black.blue)) SetPixelBlue(q,equalize_map[ ScaleQuantumToMap(GetPixelBlue(q))].blue); if (((channel & OpacityChannel) != 0) && (white.opacity != black.opacity)) SetPixelOpacity(q,equalize_map[ ScaleQuantumToMap(GetPixelOpacity(q))].opacity); if ((((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) && (white.index != black.index)) SetPixelIndex(indexes+x,equalize_map[ ScaleQuantumToMap(GetPixelIndex(indexes+x))].index); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,EqualizeImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); equalize_map=(QuantumPixelPacket *) RelinquishMagickMemory(equalize_map); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G a m m a I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GammaImage() gamma-corrects a particular image channel. The same % image viewed on different devices will have perceptual differences in the % way the image's intensities are represented on the screen. Specify % individual gamma levels for the red, green, and blue channels, or adjust % all three with the gamma parameter. Values typically range from 0.8 to 2.3. % % You can also reduce the influence of a particular channel with a gamma % value of 0. % % The format of the GammaImage method is: % % MagickBooleanType GammaImage(Image *image,const char *level) % MagickBooleanType GammaImageChannel(Image *image, % const ChannelType channel,const double gamma) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o level: the image gamma as a string (e.g. 1.6,1.2,1.0). % % o gamma: the image gamma. % */ static inline double gamma_pow(const double value,const double gamma) { return(value < 0.0 ? value : pow(value,gamma)); } MagickExport MagickBooleanType GammaImage(Image *image,const char *level) { GeometryInfo geometry_info; MagickPixelPacket gamma; MagickStatusType flags, status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (level == (char *) NULL) return(MagickFalse); gamma.red=0.0; flags=ParseGeometry(level,&geometry_info); if ((flags & RhoValue) != 0) gamma.red=geometry_info.rho; gamma.green=gamma.red; if ((flags & SigmaValue) != 0) gamma.green=geometry_info.sigma; gamma.blue=gamma.red; if ((flags & XiValue) != 0) gamma.blue=geometry_info.xi; if ((gamma.red == 1.0) && (gamma.green == 1.0) && (gamma.blue == 1.0)) return(MagickTrue); if ((gamma.red == gamma.green) && (gamma.green == gamma.blue)) status=GammaImageChannel(image,(ChannelType) (RedChannel | GreenChannel | BlueChannel),(double) gamma.red); else { status=GammaImageChannel(image,RedChannel,(double) gamma.red); status&=GammaImageChannel(image,GreenChannel,(double) gamma.green); status&=GammaImageChannel(image,BlueChannel,(double) gamma.blue); } return(status != 0 ? MagickTrue : MagickFalse); } MagickExport MagickBooleanType GammaImageChannel(Image *image, const ChannelType channel,const double gamma) { #define GammaImageTag "Gamma/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; Quantum *gamma_map; ssize_t i; ssize_t y; /* Allocate and initialize gamma maps. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); exception=(&image->exception); if (gamma == 1.0) return(MagickTrue); gamma_map=(Quantum *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*gamma_map)); if (gamma_map == (Quantum *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); (void) memset(gamma_map,0,(MaxMap+1)*sizeof(*gamma_map)); if (gamma != 0.0) for (i=0; i <= (ssize_t) MaxMap; i++) gamma_map[i]=ClampToQuantum((MagickRealType) ScaleMapToQuantum(( MagickRealType) (MaxMap*pow((double) i/MaxMap, PerceptibleReciprocal(gamma))))); if (image->storage_class == PseudoClass) { /* Gamma-correct colormap. */ for (i=0; i < (ssize_t) image->colors; i++) { #if !defined(MAGICKCORE_HDRI_SUPPORT) if ((channel & RedChannel) != 0) image->colormap[i].red=gamma_map[ScaleQuantumToMap( image->colormap[i].red)]; if ((channel & GreenChannel) != 0) image->colormap[i].green=gamma_map[ScaleQuantumToMap( image->colormap[i].green)]; if ((channel & BlueChannel) != 0) image->colormap[i].blue=gamma_map[ScaleQuantumToMap( image->colormap[i].blue)]; if ((channel & OpacityChannel) != 0) { if (image->matte == MagickFalse) image->colormap[i].opacity=gamma_map[ScaleQuantumToMap( image->colormap[i].opacity)]; else image->colormap[i].opacity=QuantumRange-gamma_map[ ScaleQuantumToMap((Quantum) (QuantumRange- image->colormap[i].opacity))]; } #else if ((channel & RedChannel) != 0) image->colormap[i].red=QuantumRange*gamma_pow(QuantumScale* image->colormap[i].red,PerceptibleReciprocal(gamma)); if ((channel & GreenChannel) != 0) image->colormap[i].green=QuantumRange*gamma_pow(QuantumScale* image->colormap[i].green,PerceptibleReciprocal(gamma)); if ((channel & BlueChannel) != 0) image->colormap[i].blue=QuantumRange*gamma_pow(QuantumScale* image->colormap[i].blue,PerceptibleReciprocal(gamma)); if ((channel & OpacityChannel) != 0) { if (image->matte == MagickFalse) image->colormap[i].opacity=QuantumRange*gamma_pow(QuantumScale* image->colormap[i].opacity,PerceptibleReciprocal(gamma)); else image->colormap[i].opacity=QuantumRange-QuantumRange*gamma_pow( QuantumScale*(QuantumRange-image->colormap[i].opacity),1.0/ gamma); } #endif } } /* Gamma-correct image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { IndexPacket *magick_restrict indexes; PixelPacket *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { #if !defined(MAGICKCORE_HDRI_SUPPORT) if ((channel & SyncChannels) != 0) { SetPixelRed(q,gamma_map[ScaleQuantumToMap(GetPixelRed(q))]); SetPixelGreen(q,gamma_map[ScaleQuantumToMap(GetPixelGreen(q))]); SetPixelBlue(q,gamma_map[ScaleQuantumToMap(GetPixelBlue(q))]); } else { if ((channel & RedChannel) != 0) SetPixelRed(q,gamma_map[ScaleQuantumToMap(GetPixelRed(q))]); if ((channel & GreenChannel) != 0) SetPixelGreen(q,gamma_map[ScaleQuantumToMap(GetPixelGreen(q))]); if ((channel & BlueChannel) != 0) SetPixelBlue(q,gamma_map[ScaleQuantumToMap(GetPixelBlue(q))]); if ((channel & OpacityChannel) != 0) { if (image->matte == MagickFalse) SetPixelOpacity(q,gamma_map[ScaleQuantumToMap( GetPixelOpacity(q))]); else SetPixelAlpha(q,gamma_map[ScaleQuantumToMap((Quantum) GetPixelAlpha(q))]); } } #else if ((channel & SyncChannels) != 0) { SetPixelRed(q,QuantumRange*gamma_pow(QuantumScale*GetPixelRed(q), PerceptibleReciprocal(gamma))); SetPixelGreen(q,QuantumRange*gamma_pow(QuantumScale*GetPixelGreen(q), PerceptibleReciprocal(gamma))); SetPixelBlue(q,QuantumRange*gamma_pow(QuantumScale*GetPixelBlue(q), PerceptibleReciprocal(gamma))); } else { if ((channel & RedChannel) != 0) SetPixelRed(q,QuantumRange*gamma_pow(QuantumScale*GetPixelRed(q), PerceptibleReciprocal(gamma))); if ((channel & GreenChannel) != 0) SetPixelGreen(q,QuantumRange*gamma_pow(QuantumScale* GetPixelGreen(q),PerceptibleReciprocal(gamma))); if ((channel & BlueChannel) != 0) SetPixelBlue(q,QuantumRange*gamma_pow(QuantumScale*GetPixelBlue(q), PerceptibleReciprocal(gamma))); if ((channel & OpacityChannel) != 0) { if (image->matte == MagickFalse) SetPixelOpacity(q,QuantumRange*gamma_pow(QuantumScale* GetPixelOpacity(q),PerceptibleReciprocal(gamma))); else SetPixelAlpha(q,QuantumRange*gamma_pow(QuantumScale* GetPixelAlpha(q),PerceptibleReciprocal(gamma))); } } #endif q++; } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) for (x=0; x < (ssize_t) image->columns; x++) SetPixelIndex(indexes+x,gamma_map[ScaleQuantumToMap( GetPixelIndex(indexes+x))]); if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,GammaImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); gamma_map=(Quantum *) RelinquishMagickMemory(gamma_map); if (image->gamma != 0.0) image->gamma*=gamma; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G r a y s c a l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GrayscaleImage() converts the colors in the reference image to gray. % % The format of the GrayscaleImageChannel method is: % % MagickBooleanType GrayscaleImage(Image *image, % const PixelIntensityMethod method) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % */ MagickExport MagickBooleanType GrayscaleImage(Image *image, const PixelIntensityMethod method) { #define GrayscaleImageTag "Grayscale/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } /* Grayscale image. */ /* call opencl version */ #if defined(MAGICKCORE_OPENCL_SUPPORT) if (AccelerateGrayscaleImage(image,method,&image->exception) != MagickFalse) { image->intensity=method; image->type=GrayscaleType; if ((method == Rec601LuminancePixelIntensityMethod) || (method == Rec709LuminancePixelIntensityMethod)) return(SetImageColorspace(image,LinearGRAYColorspace)); return(SetImageColorspace(image,GRAYColorspace)); } #endif status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { PixelPacket *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType blue, green, intensity, red; red=(MagickRealType) q->red; green=(MagickRealType) q->green; blue=(MagickRealType) q->blue; intensity=0.0; switch (method) { case AveragePixelIntensityMethod: { intensity=(red+green+blue)/3.0; break; } case BrightnessPixelIntensityMethod: { intensity=MagickMax(MagickMax(red,green),blue); break; } case LightnessPixelIntensityMethod: { intensity=(MagickMin(MagickMin(red,green),blue)+ MagickMax(MagickMax(red,green),blue))/2.0; break; } case MSPixelIntensityMethod: { intensity=(MagickRealType) (((double) red*red+green*green+ blue*blue)/(3.0*QuantumRange)); break; } case Rec601LumaPixelIntensityMethod: { if (image->colorspace == RGBColorspace) { red=EncodePixelGamma(red); green=EncodePixelGamma(green); blue=EncodePixelGamma(blue); } intensity=0.298839*red+0.586811*green+0.114350*blue; break; } case Rec601LuminancePixelIntensityMethod: { if (image->colorspace == sRGBColorspace) { red=DecodePixelGamma(red); green=DecodePixelGamma(green); blue=DecodePixelGamma(blue); } intensity=0.298839*red+0.586811*green+0.114350*blue; break; } case Rec709LumaPixelIntensityMethod: default: { if (image->colorspace == RGBColorspace) { red=EncodePixelGamma(red); green=EncodePixelGamma(green); blue=EncodePixelGamma(blue); } intensity=0.212656*red+0.715158*green+0.072186*blue; break; } case Rec709LuminancePixelIntensityMethod: { if (image->colorspace == sRGBColorspace) { red=DecodePixelGamma(red); green=DecodePixelGamma(green); blue=DecodePixelGamma(blue); } intensity=0.212656*red+0.715158*green+0.072186*blue; break; } case RMSPixelIntensityMethod: { intensity=(MagickRealType) (sqrt((double) red*red+green*green+ blue*blue)/sqrt(3.0)); break; } } SetPixelGray(q,ClampToQuantum(intensity)); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,GrayscaleImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); image->intensity=method; image->type=GrayscaleType; if ((method == Rec601LuminancePixelIntensityMethod) || (method == Rec709LuminancePixelIntensityMethod)) return(SetImageColorspace(image,LinearGRAYColorspace)); return(SetImageColorspace(image,GRAYColorspace)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % H a l d C l u t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % HaldClutImage() applies a Hald color lookup table to the image. A Hald % color lookup table is a 3-dimensional color cube mapped to 2 dimensions. % Create it with the HALD coder. You can apply any color transformation to % the Hald image and then use this method to apply the transform to the % image. % % The format of the HaldClutImage method is: % % MagickBooleanType HaldClutImage(Image *image,Image *hald_image) % MagickBooleanType HaldClutImageChannel(Image *image, % const ChannelType channel,Image *hald_image) % % A description of each parameter follows: % % o image: the image, which is replaced by indexed CLUT values % % o hald_image: the color lookup table image for replacement color values. % % o channel: the channel. % */ MagickExport MagickBooleanType HaldClutImage(Image *image, const Image *hald_image) { return(HaldClutImageChannel(image,DefaultChannels,hald_image)); } MagickExport MagickBooleanType HaldClutImageChannel(Image *image, const ChannelType channel,const Image *hald_image) { #define HaldClutImageTag "Clut/Image" typedef struct _HaldInfo { MagickRealType x, y, z; } HaldInfo; CacheView *hald_view, *image_view; double width; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket zero; size_t cube_size, length, level; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(hald_image != (Image *) NULL); assert(hald_image->signature == MagickCoreSignature); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) SetImageColorspace(image,sRGBColorspace); if (image->matte == MagickFalse) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); /* Hald clut image. */ status=MagickTrue; progress=0; length=(size_t) MagickMin((MagickRealType) hald_image->columns, (MagickRealType) hald_image->rows); for (level=2; (level*level*level) < length; level++) ; level*=level; cube_size=level*level; width=(double) hald_image->columns; GetMagickPixelPacket(hald_image,&zero); exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); hald_view=AcquireAuthenticCacheView(hald_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,hald_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { double area, offset; HaldInfo point; MagickPixelPacket pixel, pixel1, pixel2, pixel3, pixel4; IndexPacket *magick_restrict indexes; PixelPacket *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(hald_view); pixel=zero; pixel1=zero; pixel2=zero; pixel3=zero; pixel4=zero; for (x=0; x < (ssize_t) image->columns; x++) { point.x=QuantumScale*(level-1.0)*GetPixelRed(q); point.y=QuantumScale*(level-1.0)*GetPixelGreen(q); point.z=QuantumScale*(level-1.0)*GetPixelBlue(q); offset=(double) (point.x+level*floor(point.y)+cube_size*floor(point.z)); point.x-=floor(point.x); point.y-=floor(point.y); point.z-=floor(point.z); status=InterpolateMagickPixelPacket(image,hald_view, UndefinedInterpolatePixel,fmod(offset,width),floor(offset/width), &pixel1,exception); if (status == MagickFalse) break; status=InterpolateMagickPixelPacket(image,hald_view, UndefinedInterpolatePixel,fmod(offset+level,width),floor((offset+level)/ width),&pixel2,exception); if (status == MagickFalse) break; area=point.y; if (hald_image->interpolate == NearestNeighborInterpolatePixel) area=(point.y < 0.5) ? 0.0 : 1.0; MagickPixelCompositeAreaBlend(&pixel1,pixel1.opacity,&pixel2, pixel2.opacity,area,&pixel3); offset+=cube_size; status=InterpolateMagickPixelPacket(image,hald_view, UndefinedInterpolatePixel,fmod(offset,width),floor(offset/width), &pixel1,exception); if (status == MagickFalse) break; status=InterpolateMagickPixelPacket(image,hald_view, UndefinedInterpolatePixel,fmod(offset+level,width),floor((offset+level)/ width),&pixel2,exception); if (status == MagickFalse) break; MagickPixelCompositeAreaBlend(&pixel1,pixel1.opacity,&pixel2, pixel2.opacity,area,&pixel4); area=point.z; if (hald_image->interpolate == NearestNeighborInterpolatePixel) area=(point.z < 0.5)? 0.0 : 1.0; MagickPixelCompositeAreaBlend(&pixel3,pixel3.opacity,&pixel4, pixel4.opacity,area,&pixel); if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(pixel.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(pixel.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(pixel.blue)); if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) SetPixelOpacity(q,ClampToQuantum(pixel.opacity)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,ClampToQuantum(pixel.index)); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,HaldClutImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } hald_view=DestroyCacheView(hald_view); image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L e v e l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LevelImage() adjusts the levels of a particular image channel by % scaling the colors falling between specified white and black points to % the full available quantum range. % % The parameters provided represent the black, and white points. The black % point specifies the darkest color in the image. Colors darker than the % black point are set to zero. White point specifies the lightest color in % the image. Colors brighter than the white point are set to the maximum % quantum value. % % If a '!' flag is given, map black and white colors to the given levels % rather than mapping those levels to black and white. See % LevelizeImageChannel() and LevelizeImageChannel(), below. % % Gamma specifies a gamma correction to apply to the image. % % The format of the LevelImage method is: % % MagickBooleanType LevelImage(Image *image,const char *levels) % % A description of each parameter follows: % % o image: the image. % % o levels: Specify the levels where the black and white points have the % range of 0-QuantumRange, and gamma has the range 0-10 (e.g. 10x90%+2). % A '!' flag inverts the re-mapping. % */ MagickExport MagickBooleanType LevelImage(Image *image,const char *levels) { double black_point = 0.0, gamma = 1.0, white_point = (double) QuantumRange; GeometryInfo geometry_info; MagickBooleanType status; MagickStatusType flags; /* Parse levels. */ if (levels == (char *) NULL) return(MagickFalse); flags=ParseGeometry(levels,&geometry_info); if ((flags & RhoValue) != 0) black_point=geometry_info.rho; if ((flags & SigmaValue) != 0) white_point=geometry_info.sigma; if ((flags & XiValue) != 0) gamma=geometry_info.xi; if ((flags & PercentValue) != 0) { black_point*=(double) image->columns*image->rows/100.0; white_point*=(double) image->columns*image->rows/100.0; } if ((flags & SigmaValue) == 0) white_point=(double) QuantumRange-black_point; if ((flags & AspectValue ) == 0) status=LevelImageChannel(image,DefaultChannels,black_point,white_point, gamma); else status=LevelizeImage(image,black_point,white_point,gamma); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L e v e l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LevelImage() applies the normal level operation to the image, spreading % out the values between the black and white points over the entire range of % values. Gamma correction is also applied after the values has been mapped. % % It is typically used to improve image contrast, or to provide a controlled % linear threshold for the image. If the black and white points are set to % the minimum and maximum values found in the image, the image can be % normalized. or by swapping black and white values, negate the image. % % The format of the LevelImage method is: % % MagickBooleanType LevelImage(Image *image,const double black_point, % const double white_point,const double gamma) % MagickBooleanType LevelImageChannel(Image *image, % const ChannelType channel,const double black_point, % const double white_point,const double gamma) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o black_point: The level which is to be mapped to zero (black) % % o white_point: The level which is to be mapped to QuantumRange (white) % % o gamma: adjust gamma by this factor before mapping values. % use 1.0 for purely linear stretching of image color values % */ static inline double LevelPixel(const double black_point, const double white_point,const double gamma,const MagickRealType pixel) { double level_pixel, scale; scale=PerceptibleReciprocal(white_point-black_point); level_pixel=QuantumRange*gamma_pow(scale*((double) pixel-black_point), PerceptibleReciprocal(gamma)); return(level_pixel); } MagickExport MagickBooleanType LevelImageChannel(Image *image, const ChannelType channel,const double black_point,const double white_point, const double gamma) { #define LevelImageTag "Level/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; ssize_t i; ssize_t y; /* Allocate and initialize levels map. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) for (i=0; i < (ssize_t) image->colors; i++) { /* Level colormap. */ if ((channel & RedChannel) != 0) image->colormap[i].red=(Quantum) ClampToQuantum(LevelPixel(black_point, white_point,gamma,(MagickRealType) image->colormap[i].red)); if ((channel & GreenChannel) != 0) image->colormap[i].green=(Quantum) ClampToQuantum(LevelPixel( black_point,white_point,gamma,(MagickRealType) image->colormap[i].green)); if ((channel & BlueChannel) != 0) image->colormap[i].blue=(Quantum) ClampToQuantum(LevelPixel(black_point, white_point,gamma,(MagickRealType) image->colormap[i].blue)); if ((channel & OpacityChannel) != 0) image->colormap[i].opacity=(Quantum) (QuantumRange-(Quantum) ClampToQuantum(LevelPixel(black_point,white_point,gamma, (MagickRealType) (QuantumRange-image->colormap[i].opacity)))); } /* Level image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { IndexPacket *magick_restrict indexes; PixelPacket *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(LevelPixel(black_point,white_point,gamma, (MagickRealType) GetPixelRed(q)))); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(LevelPixel(black_point,white_point,gamma, (MagickRealType) GetPixelGreen(q)))); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(LevelPixel(black_point,white_point,gamma, (MagickRealType) GetPixelBlue(q)))); if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) SetPixelAlpha(q,ClampToQuantum(LevelPixel(black_point,white_point,gamma, (MagickRealType) GetPixelAlpha(q)))); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,ClampToQuantum(LevelPixel(black_point, white_point,gamma,(MagickRealType) GetPixelIndex(indexes+x)))); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,LevelImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); (void) ClampImage(image); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L e v e l i z e I m a g e C h a n n e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LevelizeImageChannel() applies the reversed LevelImage() operation to just % the specific channels specified. It compresses the full range of color % values, so that they lie between the given black and white points. Gamma is % applied before the values are mapped. % % LevelizeImageChannel() can be called with by using a +level command line % API option, or using a '!' on a -level or LevelImage() geometry string. % % It can be used for example de-contrast a greyscale image to the exact % levels specified. Or by using specific levels for each channel of an image % you can convert a gray-scale image to any linear color gradient, according % to those levels. % % The format of the LevelizeImageChannel method is: % % MagickBooleanType LevelizeImageChannel(Image *image, % const ChannelType channel,const char *levels) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o black_point: The level to map zero (black) to. % % o white_point: The level to map QuantumRange (white) to. % % o gamma: adjust gamma by this factor before mapping values. % */ MagickExport MagickBooleanType LevelizeImage(Image *image, const double black_point,const double white_point,const double gamma) { MagickBooleanType status; status=LevelizeImageChannel(image,DefaultChannels,black_point,white_point, gamma); return(status); } MagickExport MagickBooleanType LevelizeImageChannel(Image *image, const ChannelType channel,const double black_point,const double white_point, const double gamma) { #define LevelizeImageTag "Levelize/Image" #define LevelizeValue(x) ClampToQuantum(((MagickRealType) gamma_pow((double) \ (QuantumScale*(x)),gamma))*(white_point-black_point)+black_point) CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; ssize_t i; ssize_t y; /* Allocate and initialize levels map. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) for (i=0; i < (ssize_t) image->colors; i++) { /* Level colormap. */ if ((channel & RedChannel) != 0) image->colormap[i].red=LevelizeValue(image->colormap[i].red); if ((channel & GreenChannel) != 0) image->colormap[i].green=LevelizeValue(image->colormap[i].green); if ((channel & BlueChannel) != 0) image->colormap[i].blue=LevelizeValue(image->colormap[i].blue); if ((channel & OpacityChannel) != 0) image->colormap[i].opacity=(Quantum) (QuantumRange-LevelizeValue( QuantumRange-image->colormap[i].opacity)); } /* Level image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { IndexPacket *magick_restrict indexes; PixelPacket *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q,LevelizeValue(GetPixelRed(q))); if ((channel & GreenChannel) != 0) SetPixelGreen(q,LevelizeValue(GetPixelGreen(q))); if ((channel & BlueChannel) != 0) SetPixelBlue(q,LevelizeValue(GetPixelBlue(q))); if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) SetPixelAlpha(q,LevelizeValue(GetPixelAlpha(q))); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,LevelizeValue(GetPixelIndex(indexes+x))); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,LevelizeImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L e v e l I m a g e C o l o r s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LevelImageColor() maps the given color to "black" and "white" values, % linearly spreading out the colors, and level values on a channel by channel % bases, as per LevelImage(). The given colors allows you to specify % different level ranges for each of the color channels separately. % % If the boolean 'invert' is set true the image values will modifyed in the % reverse direction. That is any existing "black" and "white" colors in the % image will become the color values given, with all other values compressed % appropriatally. This effectivally maps a greyscale gradient into the given % color gradient. % % The format of the LevelColorsImageChannel method is: % % MagickBooleanType LevelColorsImage(Image *image, % const MagickPixelPacket *black_color, % const MagickPixelPacket *white_color,const MagickBooleanType invert) % MagickBooleanType LevelColorsImageChannel(Image *image, % const ChannelType channel,const MagickPixelPacket *black_color, % const MagickPixelPacket *white_color,const MagickBooleanType invert) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o black_color: The color to map black to/from % % o white_point: The color to map white to/from % % o invert: if true map the colors (levelize), rather than from (level) % */ MagickExport MagickBooleanType LevelColorsImage(Image *image, const MagickPixelPacket *black_color,const MagickPixelPacket *white_color, const MagickBooleanType invert) { MagickBooleanType status; status=LevelColorsImageChannel(image,DefaultChannels,black_color,white_color, invert); return(status); } MagickExport MagickBooleanType LevelColorsImageChannel(Image *image, const ChannelType channel,const MagickPixelPacket *black_color, const MagickPixelPacket *white_color,const MagickBooleanType invert) { MagickStatusType status; /* Allocate and initialize levels map. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((IsGrayColorspace(image->colorspace) != MagickFalse) && ((IsGrayColorspace(black_color->colorspace) != MagickFalse) || (IsGrayColorspace(white_color->colorspace) != MagickFalse))) (void) SetImageColorspace(image,sRGBColorspace); status=MagickTrue; if (invert == MagickFalse) { if ((channel & RedChannel) != 0) status&=LevelImageChannel(image,RedChannel,black_color->red, white_color->red,(double) 1.0); if ((channel & GreenChannel) != 0) status&=LevelImageChannel(image,GreenChannel,black_color->green, white_color->green,(double) 1.0); if ((channel & BlueChannel) != 0) status&=LevelImageChannel(image,BlueChannel,black_color->blue, white_color->blue,(double) 1.0); if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) status&=LevelImageChannel(image,OpacityChannel,black_color->opacity, white_color->opacity,(double) 1.0); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) status&=LevelImageChannel(image,IndexChannel,black_color->index, white_color->index,(double) 1.0); } else { if ((channel & RedChannel) != 0) status&=LevelizeImageChannel(image,RedChannel,black_color->red, white_color->red,(double) 1.0); if ((channel & GreenChannel) != 0) status&=LevelizeImageChannel(image,GreenChannel,black_color->green, white_color->green,(double) 1.0); if ((channel & BlueChannel) != 0) status&=LevelizeImageChannel(image,BlueChannel,black_color->blue, white_color->blue,(double) 1.0); if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) status&=LevelizeImageChannel(image,OpacityChannel,black_color->opacity, white_color->opacity,(double) 1.0); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) status&=LevelizeImageChannel(image,IndexChannel,black_color->index, white_color->index,(double) 1.0); } return(status == 0 ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L i n e a r S t r e t c h I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LinearStretchImage() discards any pixels below the black point and above % the white point and levels the remaining pixels. % % The format of the LinearStretchImage method is: % % MagickBooleanType LinearStretchImage(Image *image, % const double black_point,const double white_point) % % A description of each parameter follows: % % o image: the image. % % o black_point: the black point. % % o white_point: the white point. % */ MagickExport MagickBooleanType LinearStretchImage(Image *image, const double black_point,const double white_point) { #define LinearStretchImageTag "LinearStretch/Image" ExceptionInfo *exception; MagickBooleanType status; MagickRealType *histogram, intensity; ssize_t black, white, y; /* Allocate histogram and linear map. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); exception=(&image->exception); histogram=(MagickRealType *) AcquireQuantumMemory(MaxMap+1UL, sizeof(*histogram)); if (histogram == (MagickRealType *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); /* Form histogram. */ (void) memset(histogram,0,(MaxMap+1)*sizeof(*histogram)); for (y=0; y < (ssize_t) image->rows; y++) { const PixelPacket *magick_restrict p; ssize_t x; p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; for (x=(ssize_t) image->columns-1; x >= 0; x--) { histogram[ScaleQuantumToMap(ClampToQuantum(GetPixelIntensity(image,p)))]++; p++; } } /* Find the histogram boundaries by locating the black and white point levels. */ intensity=0.0; for (black=0; black < (ssize_t) MaxMap; black++) { intensity+=histogram[black]; if (intensity >= black_point) break; } intensity=0.0; for (white=(ssize_t) MaxMap; white != 0; white--) { intensity+=histogram[white]; if (intensity >= white_point) break; } histogram=(MagickRealType *) RelinquishMagickMemory(histogram); status=LevelImageChannel(image,DefaultChannels,(double) ScaleMapToQuantum(black),(double) ScaleMapToQuantum(white),1.0); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M o d u l a t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ModulateImage() lets you control the brightness, saturation, and hue % of an image. Modulate represents the brightness, saturation, and hue % as one parameter (e.g. 90,150,100). If the image colorspace is HSL, the % modulation is lightness, saturation, and hue. For HWB, use blackness, % whiteness, and hue. And for HCL, use chrome, luma, and hue. % % The format of the ModulateImage method is: % % MagickBooleanType ModulateImage(Image *image,const char *modulate) % % A description of each parameter follows: % % o image: the image. % % o modulate: Define the percent change in brightness, saturation, and % hue. % */ static inline void ModulateHCL(const double percent_hue, const double percent_chroma,const double percent_luma,Quantum *red, Quantum *green,Quantum *blue) { double hue, luma, chroma; /* Increase or decrease color luma, chroma, or hue. */ ConvertRGBToHCL(*red,*green,*blue,&hue,&chroma,&luma); hue+=fmod((percent_hue-100.0),200.0)/200.0; chroma*=0.01*percent_chroma; luma*=0.01*percent_luma; ConvertHCLToRGB(hue,chroma,luma,red,green,blue); } static inline void ModulateHCLp(const double percent_hue, const double percent_chroma,const double percent_luma,Quantum *red, Quantum *green,Quantum *blue) { double hue, luma, chroma; /* Increase or decrease color luma, chroma, or hue. */ ConvertRGBToHCLp(*red,*green,*blue,&hue,&chroma,&luma); hue+=fmod((percent_hue-100.0),200.0)/200.0; chroma*=0.01*percent_chroma; luma*=0.01*percent_luma; ConvertHCLpToRGB(hue,chroma,luma,red,green,blue); } static inline void ModulateHSB(const double percent_hue, const double percent_saturation,const double percent_brightness,Quantum *red, Quantum *green,Quantum *blue) { double brightness, hue, saturation; /* Increase or decrease color brightness, saturation, or hue. */ ConvertRGBToHSB(*red,*green,*blue,&hue,&saturation,&brightness); hue+=fmod((percent_hue-100.0),200.0)/200.0; saturation*=0.01*percent_saturation; brightness*=0.01*percent_brightness; ConvertHSBToRGB(hue,saturation,brightness,red,green,blue); } static inline void ModulateHSI(const double percent_hue, const double percent_saturation,const double percent_intensity,Quantum *red, Quantum *green,Quantum *blue) { double intensity, hue, saturation; /* Increase or decrease color intensity, saturation, or hue. */ ConvertRGBToHSI(*red,*green,*blue,&hue,&saturation,&intensity); hue+=fmod((percent_hue-100.0),200.0)/200.0; saturation*=0.01*percent_saturation; intensity*=0.01*percent_intensity; ConvertHSIToRGB(hue,saturation,intensity,red,green,blue); } static inline void ModulateHSL(const double percent_hue, const double percent_saturation,const double percent_lightness,Quantum *red, Quantum *green,Quantum *blue) { double hue, lightness, saturation; /* Increase or decrease color lightness, saturation, or hue. */ ConvertRGBToHSL(*red,*green,*blue,&hue,&saturation,&lightness); hue+=fmod((percent_hue-100.0),200.0)/200.0; saturation*=0.01*percent_saturation; lightness*=0.01*percent_lightness; ConvertHSLToRGB(hue,saturation,lightness,red,green,blue); } static inline void ModulateHSV(const double percent_hue, const double percent_saturation,const double percent_value,Quantum *red, Quantum *green,Quantum *blue) { double hue, saturation, value; /* Increase or decrease color value, saturation, or hue. */ ConvertRGBToHSV(*red,*green,*blue,&hue,&saturation,&value); hue+=fmod((percent_hue-100.0),200.0)/200.0; saturation*=0.01*percent_saturation; value*=0.01*percent_value; ConvertHSVToRGB(hue,saturation,value,red,green,blue); } static inline void ModulateHWB(const double percent_hue, const double percent_whiteness,const double percent_blackness,Quantum *red, Quantum *green,Quantum *blue) { double blackness, hue, whiteness; /* Increase or decrease color blackness, whiteness, or hue. */ ConvertRGBToHWB(*red,*green,*blue,&hue,&whiteness,&blackness); hue+=fmod((percent_hue-100.0),200.0)/200.0; blackness*=0.01*percent_blackness; whiteness*=0.01*percent_whiteness; ConvertHWBToRGB(hue,whiteness,blackness,red,green,blue); } static inline void ModulateLCHab(const double percent_luma, const double percent_chroma,const double percent_hue,Quantum *red, Quantum *green,Quantum *blue) { double hue, luma, chroma; /* Increase or decrease color luma, chroma, or hue. */ ConvertRGBToLCHab(*red,*green,*blue,&luma,&chroma,&hue); luma*=0.01*percent_luma; chroma*=0.01*percent_chroma; hue+=fmod((percent_hue-100.0),200.0)/200.0; ConvertLCHabToRGB(luma,chroma,hue,red,green,blue); } static inline void ModulateLCHuv(const double percent_luma, const double percent_chroma,const double percent_hue,Quantum *red, Quantum *green,Quantum *blue) { double hue, luma, chroma; /* Increase or decrease color luma, chroma, or hue. */ ConvertRGBToLCHuv(*red,*green,*blue,&luma,&chroma,&hue); luma*=0.01*percent_luma; chroma*=0.01*percent_chroma; hue+=fmod((percent_hue-100.0),200.0)/200.0; ConvertLCHuvToRGB(luma,chroma,hue,red,green,blue); } MagickExport MagickBooleanType ModulateImage(Image *image,const char *modulate) { #define ModulateImageTag "Modulate/Image" CacheView *image_view; ColorspaceType colorspace; const char *artifact; double percent_brightness = 100.0, percent_hue = 100.0, percent_saturation = 100.0; ExceptionInfo *exception; GeometryInfo geometry_info; MagickBooleanType status; MagickOffsetType progress; MagickStatusType flags; ssize_t i; ssize_t y; /* Initialize modulate table. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (modulate == (char *) NULL) return(MagickFalse); if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) (void) SetImageColorspace(image,sRGBColorspace); flags=ParseGeometry(modulate,&geometry_info); if ((flags & RhoValue) != 0) percent_brightness=geometry_info.rho; if ((flags & SigmaValue) != 0) percent_saturation=geometry_info.sigma; if ((flags & XiValue) != 0) percent_hue=geometry_info.xi; colorspace=UndefinedColorspace; artifact=GetImageArtifact(image,"modulate:colorspace"); if (artifact != (const char *) NULL) colorspace=(ColorspaceType) ParseCommandOption(MagickColorspaceOptions, MagickFalse,artifact); if (image->storage_class == PseudoClass) for (i=0; i < (ssize_t) image->colors; i++) { Quantum blue, green, red; /* Modulate image colormap. */ red=image->colormap[i].red; green=image->colormap[i].green; blue=image->colormap[i].blue; switch (colorspace) { case HCLColorspace: { ModulateHCL(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HCLpColorspace: { ModulateHCLp(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSBColorspace: { ModulateHSB(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSIColorspace: { ModulateHSI(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSLColorspace: default: { ModulateHSL(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSVColorspace: { ModulateHSV(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HWBColorspace: { ModulateHWB(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case LCHabColorspace: case LCHColorspace: { ModulateLCHab(percent_brightness,percent_saturation,percent_hue, &red,&green,&blue); break; } case LCHuvColorspace: { ModulateLCHuv(percent_brightness,percent_saturation,percent_hue, &red,&green,&blue); break; } } image->colormap[i].red=red; image->colormap[i].green=green; image->colormap[i].blue=blue; } /* Modulate image. */ /* call opencl version */ #if defined(MAGICKCORE_OPENCL_SUPPORT) status=AccelerateModulateImage(image,percent_brightness,percent_hue, percent_saturation,colorspace,&image->exception); if (status != MagickFalse) return status; #endif status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { PixelPacket *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { Quantum blue, green, red; red=GetPixelRed(q); green=GetPixelGreen(q); blue=GetPixelBlue(q); switch (colorspace) { case HCLColorspace: { ModulateHCL(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HCLpColorspace: { ModulateHCLp(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSBColorspace: { ModulateHSB(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSLColorspace: default: { ModulateHSL(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSVColorspace: { ModulateHSV(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HWBColorspace: { ModulateHWB(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case LCHabColorspace: { ModulateLCHab(percent_brightness,percent_saturation,percent_hue, &red,&green,&blue); break; } case LCHColorspace: case LCHuvColorspace: { ModulateLCHuv(percent_brightness,percent_saturation,percent_hue, &red,&green,&blue); break; } } SetPixelRed(q,red); SetPixelGreen(q,green); SetPixelBlue(q,blue); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ModulateImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e g a t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NegateImage() negates the colors in the reference image. The grayscale % option means that only grayscale values within the image are negated. % % The format of the NegateImageChannel method is: % % MagickBooleanType NegateImage(Image *image, % const MagickBooleanType grayscale) % MagickBooleanType NegateImageChannel(Image *image, % const ChannelType channel,const MagickBooleanType grayscale) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o grayscale: If MagickTrue, only negate grayscale pixels within the image. % */ MagickExport MagickBooleanType NegateImage(Image *image, const MagickBooleanType grayscale) { MagickBooleanType status; status=NegateImageChannel(image,DefaultChannels,grayscale); return(status); } MagickExport MagickBooleanType NegateImageChannel(Image *image, const ChannelType channel,const MagickBooleanType grayscale) { #define NegateImageTag "Negate/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; ssize_t i; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) { /* Negate colormap. */ for (i=0; i < (ssize_t) image->colors; i++) { if (grayscale != MagickFalse) if ((image->colormap[i].red != image->colormap[i].green) || (image->colormap[i].green != image->colormap[i].blue)) continue; if ((channel & RedChannel) != 0) image->colormap[i].red=QuantumRange-image->colormap[i].red; if ((channel & GreenChannel) != 0) image->colormap[i].green=QuantumRange-image->colormap[i].green; if ((channel & BlueChannel) != 0) image->colormap[i].blue=QuantumRange-image->colormap[i].blue; } } /* Negate image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); if (grayscale != MagickFalse) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; IndexPacket *magick_restrict indexes; PixelPacket *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((GetPixelRed(q) != GetPixelGreen(q)) || (GetPixelGreen(q) != GetPixelBlue(q))) { q++; continue; } if ((channel & RedChannel) != 0) SetPixelRed(q,QuantumRange-GetPixelRed(q)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,QuantumRange-GetPixelGreen(q)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,QuantumRange-GetPixelBlue(q)); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,QuantumRange-GetPixelOpacity(q)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,QuantumRange-GetPixelIndex(indexes+x)); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,NegateImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(MagickTrue); } /* Negate image. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { IndexPacket *magick_restrict indexes; PixelPacket *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); if (channel == DefaultChannels) for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q+x,QuantumRange-GetPixelRed(q+x)); SetPixelGreen(q+x,QuantumRange-GetPixelGreen(q+x)); SetPixelBlue(q+x,QuantumRange-GetPixelBlue(q+x)); } else for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q+x,QuantumRange-GetPixelRed(q+x)); if ((channel & GreenChannel) != 0) SetPixelGreen(q+x,QuantumRange-GetPixelGreen(q+x)); if ((channel & BlueChannel) != 0) SetPixelBlue(q+x,QuantumRange-GetPixelBlue(q+x)); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q+x,QuantumRange-GetPixelOpacity(q+x)); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) for (x=0; x < (ssize_t) image->columns; x++) SetPixelIndex(indexes+x,QuantumRange-GetPixelIndex(indexes+x)); if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,NegateImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N o r m a l i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % The NormalizeImage() method enhances the contrast of a color image by % mapping the darkest 2 percent of all pixel to black and the brightest % 1 percent to white. % % The format of the NormalizeImage method is: % % MagickBooleanType NormalizeImage(Image *image) % MagickBooleanType NormalizeImageChannel(Image *image, % const ChannelType channel) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % */ MagickExport MagickBooleanType NormalizeImage(Image *image) { MagickBooleanType status; status=NormalizeImageChannel(image,DefaultChannels); return(status); } MagickExport MagickBooleanType NormalizeImageChannel(Image *image, const ChannelType channel) { double black_point, white_point; black_point=(double) image->columns*image->rows*0.0015; white_point=(double) image->columns*image->rows*0.9995; return(ContrastStretchImageChannel(image,channel,black_point,white_point)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S i g m o i d a l C o n t r a s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SigmoidalContrastImage() adjusts the contrast of an image with a non-linear % sigmoidal contrast algorithm. Increase the contrast of the image using a % sigmoidal transfer function without saturating highlights or shadows. % Contrast indicates how much to increase the contrast (0 is none; 3 is % typical; 20 is pushing it); mid-point indicates where midtones fall in the % resultant image (0 is white; 50% is middle-gray; 100% is black). Set % sharpen to MagickTrue to increase the image contrast otherwise the contrast % is reduced. % % The format of the SigmoidalContrastImage method is: % % MagickBooleanType SigmoidalContrastImage(Image *image, % const MagickBooleanType sharpen,const char *levels) % MagickBooleanType SigmoidalContrastImageChannel(Image *image, % const ChannelType channel,const MagickBooleanType sharpen, % const double contrast,const double midpoint) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o sharpen: Increase or decrease image contrast. % % o contrast: strength of the contrast, the larger the number the more % 'threshold-like' it becomes. % % o midpoint: midpoint of the function as a color value 0 to QuantumRange. % */ /* ImageMagick 7 has a version of this function which does not use LUTs. */ /* Sigmoidal function Sigmoidal with inflexion point moved to b and "slope constant" set to a. The first version, based on the hyperbolic tangent tanh, when combined with the scaling step, is an exact arithmetic clone of the sigmoid function based on the logistic curve. The equivalence is based on the identity 1/(1+exp(-t)) = (1+tanh(t/2))/2 (http://de.wikipedia.org/wiki/Sigmoidfunktion) and the fact that the scaled sigmoidal derivation is invariant under affine transformations of the ordinate. The tanh version is almost certainly more accurate and cheaper. The 0.5 factor in the argument is to clone the legacy ImageMagick behavior. The reason for making the define depend on atanh even though it only uses tanh has to do with the construction of the inverse of the scaled sigmoidal. */ #if defined(MAGICKCORE_HAVE_ATANH) #define Sigmoidal(a,b,x) ( tanh((0.5*(a))*((x)-(b))) ) #else #define Sigmoidal(a,b,x) ( 1.0/(1.0+exp((a)*((b)-(x)))) ) #endif /* Scaled sigmoidal function: ( Sigmoidal(a,b,x) - Sigmoidal(a,b,0) ) / ( Sigmoidal(a,b,1) - Sigmoidal(a,b,0) ) See http://osdir.com/ml/video.image-magick.devel/2005-04/msg00006.html and http://www.cs.dartmouth.edu/farid/downloads/tutorials/fip.pdf. The limit of ScaledSigmoidal as a->0 is the identity, but a=0 gives a division by zero. This is fixed below by exiting immediately when contrast is small, leaving the image (or colormap) unmodified. This appears to be safe because the series expansion of the logistic sigmoidal function around x=b is 1/2-a*(b-x)/4+... so that the key denominator s(1)-s(0) is about a/4 (a/2 with tanh). */ #define ScaledSigmoidal(a,b,x) ( \ (Sigmoidal((a),(b),(x))-Sigmoidal((a),(b),0.0)) / \ (Sigmoidal((a),(b),1.0)-Sigmoidal((a),(b),0.0)) ) /* Inverse of ScaledSigmoidal, used for +sigmoidal-contrast. Because b may be 0 or 1, the argument of the hyperbolic tangent (resp. logistic sigmoidal) may be outside of the interval (-1,1) (resp. (0,1)), even when creating a LUT from in gamut values, hence the branching. In addition, HDRI may have out of gamut values. InverseScaledSigmoidal is not a two-sided inverse of ScaledSigmoidal: It is only a right inverse. This is unavoidable. */ static inline double InverseScaledSigmoidal(const double a,const double b, const double x) { const double sig0=Sigmoidal(a,b,0.0); const double sig1=Sigmoidal(a,b,1.0); const double argument=(sig1-sig0)*x+sig0; const double clamped= ( #if defined(MAGICKCORE_HAVE_ATANH) argument < -1+MagickEpsilon ? -1+MagickEpsilon : ( argument > 1-MagickEpsilon ? 1-MagickEpsilon : argument ) ); return(b+(2.0/a)*atanh(clamped)); #else argument < MagickEpsilon ? MagickEpsilon : ( argument > 1-MagickEpsilon ? 1-MagickEpsilon : argument ) ); return(b-log(1.0/clamped-1.0)/a); #endif } MagickExport MagickBooleanType SigmoidalContrastImage(Image *image, const MagickBooleanType sharpen,const char *levels) { GeometryInfo geometry_info; MagickBooleanType status; MagickStatusType flags; flags=ParseGeometry(levels,&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0*QuantumRange/2.0; if ((flags & PercentValue) != 0) geometry_info.sigma=1.0*QuantumRange*geometry_info.sigma/100.0; status=SigmoidalContrastImageChannel(image,DefaultChannels,sharpen, geometry_info.rho,geometry_info.sigma); return(status); } MagickExport MagickBooleanType SigmoidalContrastImageChannel(Image *image, const ChannelType channel,const MagickBooleanType sharpen, const double contrast,const double midpoint) { #define SigmoidalContrastImageTag "SigmoidalContrast/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; MagickRealType *sigmoidal_map; ssize_t i; ssize_t y; /* Side effect: clamps values unless contrast<MagickEpsilon, in which case nothing is done. */ if (contrast < MagickEpsilon) return(MagickTrue); /* Allocate and initialize sigmoidal maps. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); exception=(&image->exception); sigmoidal_map=(MagickRealType *) AcquireQuantumMemory(MaxMap+1UL, sizeof(*sigmoidal_map)); if (sigmoidal_map == (MagickRealType *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); (void) memset(sigmoidal_map,0,(MaxMap+1)*sizeof(*sigmoidal_map)); if (sharpen != MagickFalse) for (i=0; i <= (ssize_t) MaxMap; i++) sigmoidal_map[i]=(MagickRealType) ScaleMapToQuantum((MagickRealType) (MaxMap*ScaledSigmoidal(contrast,QuantumScale*midpoint,(double) i/ MaxMap))); else for (i=0; i <= (ssize_t) MaxMap; i++) sigmoidal_map[i]=(MagickRealType) ScaleMapToQuantum((MagickRealType) ( MaxMap*InverseScaledSigmoidal(contrast,QuantumScale*midpoint,(double) i/ MaxMap))); /* Sigmoidal-contrast enhance colormap. */ if (image->storage_class == PseudoClass) for (i=0; i < (ssize_t) image->colors; i++) { if ((channel & RedChannel) != 0) image->colormap[i].red=ClampToQuantum(sigmoidal_map[ ScaleQuantumToMap(image->colormap[i].red)]); if ((channel & GreenChannel) != 0) image->colormap[i].green=ClampToQuantum(sigmoidal_map[ ScaleQuantumToMap(image->colormap[i].green)]); if ((channel & BlueChannel) != 0) image->colormap[i].blue=ClampToQuantum(sigmoidal_map[ ScaleQuantumToMap(image->colormap[i].blue)]); if ((channel & OpacityChannel) != 0) image->colormap[i].opacity=ClampToQuantum(sigmoidal_map[ ScaleQuantumToMap(image->colormap[i].opacity)]); } /* Sigmoidal-contrast enhance image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { IndexPacket *magick_restrict indexes; PixelPacket *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(sigmoidal_map[ScaleQuantumToMap( GetPixelRed(q))])); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(sigmoidal_map[ScaleQuantumToMap( GetPixelGreen(q))])); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(sigmoidal_map[ScaleQuantumToMap( GetPixelBlue(q))])); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,ClampToQuantum(sigmoidal_map[ScaleQuantumToMap( GetPixelOpacity(q))])); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,ClampToQuantum(sigmoidal_map[ScaleQuantumToMap( GetPixelIndex(indexes+x))])); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SigmoidalContrastImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); sigmoidal_map=(MagickRealType *) RelinquishMagickMemory(sigmoidal_map); return(status); }
target_array_extension.c
// -------------------------------------------------- // Check extends before // -------------------------------------------------- // RUN: %libomptarget-compile-aarch64-unknown-linux-gnu \ // RUN: -fopenmp-version=51 -DEXTENDS=BEFORE // RUN: %libomptarget-run-fail-aarch64-unknown-linux-gnu 2>&1 \ // RUN: | %fcheck-aarch64-unknown-linux-gnu // RUN: %libomptarget-compile-powerpc64-ibm-linux-gnu \ // RUN: -fopenmp-version=51 -DEXTENDS=BEFORE // RUN: %libomptarget-run-fail-powerpc64-ibm-linux-gnu 2>&1 \ // RUN: | %fcheck-powerpc64-ibm-linux-gnu // RUN: %libomptarget-compile-powerpc64le-ibm-linux-gnu \ // RUN: -fopenmp-version=51 -DEXTENDS=BEFORE // RUN: %libomptarget-run-fail-powerpc64le-ibm-linux-gnu 2>&1 \ // RUN: | %fcheck-powerpc64le-ibm-linux-gnu // RUN: %libomptarget-compile-x86_64-pc-linux-gnu \ // RUN: -fopenmp-version=51 -DEXTENDS=BEFORE // RUN: %libomptarget-run-fail-x86_64-pc-linux-gnu 2>&1 \ // RUN: | %fcheck-x86_64-pc-linux-gnu // -------------------------------------------------- // Check extends after // -------------------------------------------------- // RUN: %libomptarget-compile-aarch64-unknown-linux-gnu \ // RUN: -fopenmp-version=51 -DEXTENDS=AFTER // RUN: %libomptarget-run-fail-aarch64-unknown-linux-gnu 2>&1 \ // RUN: | %fcheck-aarch64-unknown-linux-gnu // RUN: %libomptarget-compile-powerpc64-ibm-linux-gnu \ // RUN: -fopenmp-version=51 -DEXTENDS=AFTER // RUN: %libomptarget-run-fail-powerpc64-ibm-linux-gnu 2>&1 \ // RUN: | %fcheck-powerpc64-ibm-linux-gnu // RUN: %libomptarget-compile-powerpc64le-ibm-linux-gnu \ // RUN: -fopenmp-version=51 -DEXTENDS=AFTER // RUN: %libomptarget-run-fail-powerpc64le-ibm-linux-gnu 2>&1 \ // RUN: | %fcheck-powerpc64le-ibm-linux-gnu // RUN: %libomptarget-compile-x86_64-pc-linux-gnu \ // RUN: -fopenmp-version=51 -DEXTENDS=AFTER // RUN: %libomptarget-run-fail-x86_64-pc-linux-gnu 2>&1 \ // RUN: | %fcheck-x86_64-pc-linux-gnu // END. #include <stdio.h> #define BEFORE 0 #define AFTER 1 #define SIZE 100 #if EXTENDS == BEFORE # define SMALL_BEG (SIZE-2) # define SMALL_END SIZE # define LARGE_BEG 0 # define LARGE_END SIZE #elif EXTENDS == AFTER # define SMALL_BEG 0 # define SMALL_END 2 # define LARGE_BEG 0 # define LARGE_END SIZE #else # error EXTENDS undefined #endif #define SMALL_SIZE (SMALL_END-SMALL_BEG) #define LARGE_SIZE (LARGE_END-LARGE_BEG) #define SMALL SMALL_BEG:SMALL_SIZE #define LARGE LARGE_BEG:LARGE_SIZE int main() { int arr[SIZE]; // CHECK: addr=0x[[#%x,SMALL_ADDR:]], size=[[#%u,SMALL_BYTES:]] fprintf(stderr, "addr=%p, size=%ld\n", &arr[SMALL_BEG], SMALL_SIZE * sizeof arr[0]); // CHECK: addr=0x[[#%x,LARGE_ADDR:]], size=[[#%u,LARGE_BYTES:]] fprintf(stderr, "addr=%p, size=%ld\n", &arr[LARGE_BEG], LARGE_SIZE * sizeof arr[0]); // CHECK-NOT: Libomptarget #pragma omp target data map(alloc: arr[LARGE]) { #pragma omp target map(present, tofrom: arr[SMALL]) ; } // CHECK: arr is present fprintf(stderr, "arr is present\n"); // CHECK: Libomptarget message: explicit extension not allowed: host address specified is 0x{{0*}}[[#LARGE_ADDR]] ([[#LARGE_BYTES]] bytes), but device allocation maps to host at 0x{{0*}}[[#SMALL_ADDR]] ([[#SMALL_BYTES]] bytes) // CHECK: Libomptarget message: device mapping required by 'present' map type modifier does not exist for host address 0x{{0*}}[[#LARGE_ADDR]] ([[#LARGE_BYTES]] bytes) // CHECK: Libomptarget fatal error 1: failure of target construct while offloading is mandatory #pragma omp target data map(alloc: arr[SMALL]) { #pragma omp target map(present, tofrom: arr[LARGE]) ; } // CHECK-NOT: arr is present fprintf(stderr, "arr is present\n"); return 0; }
enhance.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % EEEEE N N H H AAA N N CCCC EEEEE % % E NN N H H A A NN N C E % % EEE N N N HHHHH AAAAA N N N C EEE % % E N NN H H A A N NN C E % % EEEEE N N H H A A N N CCCC EEEEE % % % % % % MagickCore Image Enhancement Methods % % % % Software Design % % John Cristy % % July 1992 % % % % % % Copyright 1999-2011 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/artifact.h" #include "magick/cache.h" #include "magick/cache-view.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colorspace.h" #include "magick/composite-private.h" #include "magick/enhance.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/fx.h" #include "magick/gem.h" #include "magick/geometry.h" #include "magick/histogram.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/memory_.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/pixel-private.h" #include "magick/quantum.h" #include "magick/quantum-private.h" #include "magick/resample.h" #include "magick/resample-private.h" #include "magick/statistic.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/thread-private.h" #include "magick/token.h" #include "magick/xml-tree.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A u t o G a m m a I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AutoGammaImage() extract the 'mean' from the image and adjust the image % to try make set its gamma appropriatally. % % The format of the AutoGammaImage method is: % % MagickBooleanType AutoGammaImage(Image *image) % MagickBooleanType AutoGammaImageChannel(Image *image, % const ChannelType channel) % % A description of each parameter follows: % % o image: The image to auto-level % % o channel: The channels to auto-level. If the special 'SyncChannels' % flag is set all given channels is adjusted in the same way using the % mean average of those channels. % */ MagickExport MagickBooleanType AutoGammaImage(Image *image) { return(AutoGammaImageChannel(image,DefaultChannels)); } MagickExport MagickBooleanType AutoGammaImageChannel(Image *image, const ChannelType channel) { MagickStatusType status; double mean,sans,gamma,logmean; logmean=log(0.5); if ((channel & SyncChannels) != 0 ) { /* Apply gamma correction equally accross all given channels */ (void) GetImageChannelMean(image,channel,&mean,&sans,&image->exception); gamma=log(mean*QuantumScale)/logmean; return LevelImageChannel(image, channel, 0.0, (double)QuantumRange, gamma); } /* auto-gamma each channel separateally */ status = MagickTrue; if ((channel & RedChannel) != 0) { (void) GetImageChannelMean(image,RedChannel,&mean,&sans, &image->exception); gamma=log(mean*QuantumScale)/logmean; status = status && LevelImageChannel(image, RedChannel, 0.0, (double)QuantumRange, gamma); } if ((channel & GreenChannel) != 0) { (void) GetImageChannelMean(image,GreenChannel,&mean,&sans, &image->exception); gamma=log(mean*QuantumScale)/logmean; status = status && LevelImageChannel(image, GreenChannel, 0.0, (double)QuantumRange, gamma); } if ((channel & BlueChannel) != 0) { (void) GetImageChannelMean(image,BlueChannel,&mean,&sans, &image->exception); gamma=log(mean*QuantumScale)/logmean; status = status && LevelImageChannel(image, BlueChannel, 0.0, (double)QuantumRange, gamma); } if (((channel & OpacityChannel) != 0) && (image->matte == MagickTrue)) { (void) GetImageChannelMean(image,OpacityChannel,&mean,&sans, &image->exception); gamma=log(mean*QuantumScale)/logmean; status = status && LevelImageChannel(image, OpacityChannel, 0.0, (double)QuantumRange, gamma); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { (void) GetImageChannelMean(image,IndexChannel,&mean,&sans, &image->exception); gamma=log(mean*QuantumScale)/logmean; status = status && LevelImageChannel(image, IndexChannel, 0.0, (double)QuantumRange, gamma); } return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A u t o L e v e l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AutoLevelImage() adjusts the levels of a particular image channel by % scaling the minimum and maximum values to the full quantum range. % % The format of the LevelImage method is: % % MagickBooleanType AutoLevelImage(Image *image) % MagickBooleanType AutoLevelImageChannel(Image *image, % const ChannelType channel) % % A description of each parameter follows: % % o image: The image to auto-level % % o channel: The channels to auto-level. If the special 'SyncChannels' % flag is set the min/max/mean value of all given channels is used for % all given channels, to all channels in the same way. % */ MagickExport MagickBooleanType AutoLevelImage(Image *image) { return(AutoLevelImageChannel(image,DefaultChannels)); } MagickExport MagickBooleanType AutoLevelImageChannel(Image *image, const ChannelType channel) { /* This is simply a convenience function around a Min/Max Histogram Stretch */ return MinMaxStretchImage(image, channel, 0.0, 0.0); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B r i g h t n e s s C o n t r a s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BrightnessContrastImage() changes the brightness and/or contrast of an % image. It converts the brightness and contrast parameters into slope and % intercept and calls a polynomical function to apply to the image. % % The format of the BrightnessContrastImage method is: % % MagickBooleanType BrightnessContrastImage(Image *image, % const double brightness,const double contrast) % MagickBooleanType BrightnessContrastImageChannel(Image *image, % const ChannelType channel,const double brightness, % const double contrast) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o brightness: the brightness percent (-100 .. 100). % % o contrast: the contrast percent (-100 .. 100). % */ MagickExport MagickBooleanType BrightnessContrastImage(Image *image, const double brightness,const double contrast) { MagickBooleanType status; status=BrightnessContrastImageChannel(image,DefaultChannels,brightness, contrast); return(status); } MagickExport MagickBooleanType BrightnessContrastImageChannel(Image *image, const ChannelType channel,const double brightness,const double contrast) { #define BrightnessContastImageTag "BrightnessContast/Image" double alpha, intercept, coefficients[2], slope; MagickBooleanType status; /* Compute slope and intercept. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); alpha=contrast; slope=tan((double) (MagickPI*(alpha/100.0+1.0)/4.0)); if (slope < 0.0) slope=0.0; intercept=brightness/100.0+((100-brightness)/200.0)*(1.0-slope); coefficients[0]=slope; coefficients[1]=intercept; status=FunctionImageChannel(image,channel,PolynomialFunction,2,coefficients, &image->exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o l o r D e c i s i o n L i s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ColorDecisionListImage() accepts a lightweight Color Correction Collection % (CCC) file which solely contains one or more color corrections and applies % the correction to the image. Here is a sample CCC file: % % <ColorCorrectionCollection xmlns="urn:ASC:CDL:v1.2"> % <ColorCorrection id="cc03345"> % <SOPNode> % <Slope> 0.9 1.2 0.5 </Slope> % <Offset> 0.4 -0.5 0.6 </Offset> % <Power> 1.0 0.8 1.5 </Power> % </SOPNode> % <SATNode> % <Saturation> 0.85 </Saturation> % </SATNode> % </ColorCorrection> % </ColorCorrectionCollection> % % which includes the slop, offset, and power for each of the RGB channels % as well as the saturation. % % The format of the ColorDecisionListImage method is: % % MagickBooleanType ColorDecisionListImage(Image *image, % const char *color_correction_collection) % % A description of each parameter follows: % % o image: the image. % % o color_correction_collection: the color correction collection in XML. % */ MagickExport MagickBooleanType ColorDecisionListImage(Image *image, const char *color_correction_collection) { #define ColorDecisionListCorrectImageTag "ColorDecisionList/Image" typedef struct _Correction { double slope, offset, power; } Correction; typedef struct _ColorCorrection { Correction red, green, blue; double saturation; } ColorCorrection; CacheView *image_view; char token[MaxTextExtent]; ColorCorrection color_correction; const char *content, *p; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; PixelPacket *cdl_map; register ssize_t i; ssize_t y; XMLTreeInfo *cc, *ccc, *sat, *sop; /* Allocate and initialize cdl maps. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (color_correction_collection == (const char *) NULL) return(MagickFalse); ccc=NewXMLTree((const char *) color_correction_collection,&image->exception); if (ccc == (XMLTreeInfo *) NULL) return(MagickFalse); cc=GetXMLTreeChild(ccc,"ColorCorrection"); if (cc == (XMLTreeInfo *) NULL) { ccc=DestroyXMLTree(ccc); return(MagickFalse); } color_correction.red.slope=1.0; color_correction.red.offset=0.0; color_correction.red.power=1.0; color_correction.green.slope=1.0; color_correction.green.offset=0.0; color_correction.green.power=1.0; color_correction.blue.slope=1.0; color_correction.blue.offset=0.0; color_correction.blue.power=1.0; color_correction.saturation=0.0; sop=GetXMLTreeChild(cc,"SOPNode"); if (sop != (XMLTreeInfo *) NULL) { XMLTreeInfo *offset, *power, *slope; slope=GetXMLTreeChild(sop,"Slope"); if (slope != (XMLTreeInfo *) NULL) { content=GetXMLTreeContent(slope); p=(const char *) content; for (i=0; (*p != '\0') && (i < 3); i++) { GetMagickToken(p,&p,token); if (*token == ',') GetMagickToken(p,&p,token); switch (i) { case 0: { color_correction.red.slope=InterpretLocaleValue(token, (char **) NULL); break; } case 1: { color_correction.green.slope=InterpretLocaleValue(token, (char **) NULL); break; } case 2: { color_correction.blue.slope=InterpretLocaleValue(token, (char **) NULL); break; } } } } offset=GetXMLTreeChild(sop,"Offset"); if (offset != (XMLTreeInfo *) NULL) { content=GetXMLTreeContent(offset); p=(const char *) content; for (i=0; (*p != '\0') && (i < 3); i++) { GetMagickToken(p,&p,token); if (*token == ',') GetMagickToken(p,&p,token); switch (i) { case 0: { color_correction.red.offset=InterpretLocaleValue(token, (char **) NULL); break; } case 1: { color_correction.green.offset=InterpretLocaleValue(token, (char **) NULL); break; } case 2: { color_correction.blue.offset=InterpretLocaleValue(token, (char **) NULL); break; } } } } power=GetXMLTreeChild(sop,"Power"); if (power != (XMLTreeInfo *) NULL) { content=GetXMLTreeContent(power); p=(const char *) content; for (i=0; (*p != '\0') && (i < 3); i++) { GetMagickToken(p,&p,token); if (*token == ',') GetMagickToken(p,&p,token); switch (i) { case 0: { color_correction.red.power=InterpretLocaleValue(token, (char **) NULL); break; } case 1: { color_correction.green.power=InterpretLocaleValue(token, (char **) NULL); break; } case 2: { color_correction.blue.power=InterpretLocaleValue(token, (char **) NULL); break; } } } } } sat=GetXMLTreeChild(cc,"SATNode"); if (sat != (XMLTreeInfo *) NULL) { XMLTreeInfo *saturation; saturation=GetXMLTreeChild(sat,"Saturation"); if (saturation != (XMLTreeInfo *) NULL) { content=GetXMLTreeContent(saturation); p=(const char *) content; GetMagickToken(p,&p,token); color_correction.saturation=InterpretLocaleValue(token, (char **) NULL); } } ccc=DestroyXMLTree(ccc); if (image->debug != MagickFalse) { (void) LogMagickEvent(TransformEvent,GetMagickModule(), " Color Correction Collection:"); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.red.slope: %g",color_correction.red.slope); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.red.offset: %g",color_correction.red.offset); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.red.power: %g",color_correction.red.power); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.green.slope: %g",color_correction.green.slope); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.green.offset: %g",color_correction.green.offset); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.green.power: %g",color_correction.green.power); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.blue.slope: %g",color_correction.blue.slope); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.blue.offset: %g",color_correction.blue.offset); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.blue.power: %g",color_correction.blue.power); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.saturation: %g",color_correction.saturation); } cdl_map=(PixelPacket *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*cdl_map)); if (cdl_map == (PixelPacket *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { cdl_map[i].red=ClampToQuantum((MagickRealType) ScaleMapToQuantum(( MagickRealType) (MaxMap*(pow(color_correction.red.slope*i/MaxMap+ color_correction.red.offset,color_correction.red.power))))); cdl_map[i].green=ClampToQuantum((MagickRealType) ScaleMapToQuantum(( MagickRealType) (MaxMap*(pow(color_correction.green.slope*i/MaxMap+ color_correction.green.offset,color_correction.green.power))))); cdl_map[i].blue=ClampToQuantum((MagickRealType) ScaleMapToQuantum(( MagickRealType) (MaxMap*(pow(color_correction.blue.slope*i/MaxMap+ color_correction.blue.offset,color_correction.blue.power))))); } if (image->storage_class == PseudoClass) { /* Apply transfer function to colormap. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) #endif for (i=0; i < (ssize_t) image->colors; i++) { double luma; luma=0.2126*image->colormap[i].red+0.7152*image->colormap[i].green+ 0.0722*image->colormap[i].blue; image->colormap[i].red=ClampToQuantum(luma+color_correction.saturation* cdl_map[ScaleQuantumToMap(image->colormap[i].red)].red-luma); image->colormap[i].green=ClampToQuantum(luma+ color_correction.saturation*cdl_map[ScaleQuantumToMap( image->colormap[i].green)].green-luma); image->colormap[i].blue=ClampToQuantum(luma+color_correction.saturation* cdl_map[ScaleQuantumToMap(image->colormap[i].blue)].blue-luma); } } /* Apply transfer function to image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireCacheView(image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) #endif for (y=0; y < (ssize_t) image->rows; y++) { double luma; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { luma=0.2126*GetPixelRed(q)+0.7152*GetPixelGreen(q)+ 0.0722*GetPixelBlue(q); SetPixelRed(q,ClampToQuantum(luma+color_correction.saturation* (cdl_map[ScaleQuantumToMap(GetPixelRed(q))].red-luma))); SetPixelGreen(q,ClampToQuantum(luma+color_correction.saturation* (cdl_map[ScaleQuantumToMap(GetPixelGreen(q))].green-luma))); SetPixelBlue(q,ClampToQuantum(luma+color_correction.saturation* (cdl_map[ScaleQuantumToMap(GetPixelBlue(q))].blue-luma))); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ColorDecisionListImageChannel) #endif proceed=SetImageProgress(image,ColorDecisionListCorrectImageTag, progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); cdl_map=(PixelPacket *) RelinquishMagickMemory(cdl_map); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l u t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClutImage() replaces each color value in the given image, by using it as an % index to lookup a replacement color value in a Color Look UP Table in the % form of an image. The values are extracted along a diagonal of the CLUT % image so either a horizontal or vertial gradient image can be used. % % Typically this is used to either re-color a gray-scale image according to a % color gradient in the CLUT image, or to perform a freeform histogram % (level) adjustment according to the (typically gray-scale) gradient in the % CLUT image. % % When the 'channel' mask includes the matte/alpha transparency channel but % one image has no such channel it is assumed that that image is a simple % gray-scale image that will effect the alpha channel values, either for % gray-scale coloring (with transparent or semi-transparent colors), or % a histogram adjustment of existing alpha channel values. If both images % have matte channels, direct and normal indexing is applied, which is rarely % used. % % The format of the ClutImage method is: % % MagickBooleanType ClutImage(Image *image,Image *clut_image) % MagickBooleanType ClutImageChannel(Image *image, % const ChannelType channel,Image *clut_image) % % A description of each parameter follows: % % o image: the image, which is replaced by indexed CLUT values % % o clut_image: the color lookup table image for replacement color values. % % o channel: the channel. % */ MagickExport MagickBooleanType ClutImage(Image *image,const Image *clut_image) { return(ClutImageChannel(image,DefaultChannels,clut_image)); } MagickExport MagickBooleanType ClutImageChannel(Image *image, const ChannelType channel,const Image *clut_image) { #define ClutImageTag "Clut/Image" CacheView *clut_view, *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket *clut_map; register ssize_t i; ssize_t adjust, y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(clut_image != (Image *) NULL); assert(clut_image->signature == MagickSignature); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); clut_map=(MagickPixelPacket *) AcquireQuantumMemory(MaxMap+1UL, sizeof(*clut_map)); if (clut_map == (MagickPixelPacket *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); /* Clut image. */ status=MagickTrue; progress=0; adjust=(ssize_t) (clut_image->interpolate == IntegerInterpolatePixel ? 0 : 1); exception=(&image->exception); clut_view=AcquireCacheView(clut_image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { GetMagickPixelPacket(clut_image,clut_map+i); (void) InterpolateMagickPixelPacket(clut_image,clut_view, UndefinedInterpolatePixel,QuantumScale*i*(clut_image->columns-adjust), QuantumScale*i*(clut_image->rows-adjust),clut_map+i,exception); } clut_view=DestroyCacheView(clut_view); image_view=AcquireCacheView(image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickPixelPacket pixel; register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); GetMagickPixelPacket(image,&pixel); for (x=0; x < (ssize_t) image->columns; x++) { SetMagickPixelPacket(image,q,indexes+x,&pixel); if ((channel & RedChannel) != 0) SetPixelRed(q,ClampPixelRed(clut_map+ ScaleQuantumToMap(GetPixelRed(q)))); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampPixelGreen(clut_map+ ScaleQuantumToMap(GetPixelGreen(q)))); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampPixelBlue(clut_map+ ScaleQuantumToMap(GetPixelBlue(q)))); if ((channel & OpacityChannel) != 0) { if (clut_image->matte == MagickFalse) SetPixelAlpha(q,MagickPixelIntensityToQuantum(clut_map+ ScaleQuantumToMap((Quantum) GetPixelAlpha(q)))); else if (image->matte == MagickFalse) SetPixelOpacity(q,ClampPixelOpacity(clut_map+ ScaleQuantumToMap((Quantum) MagickPixelIntensity(&pixel)))); else SetPixelOpacity(q,ClampPixelOpacity( clut_map+ScaleQuantumToMap(GetPixelOpacity(q)))); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,ClampToQuantum((clut_map+(ssize_t) GetPixelIndex(indexes+x))->index)); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ClutImageChannel) #endif proceed=SetImageProgress(image,ClutImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); clut_map=(MagickPixelPacket *) RelinquishMagickMemory(clut_map); if ((clut_image->matte != MagickFalse) && ((channel & OpacityChannel) != 0)) (void) SetImageAlphaChannel(image,ActivateAlphaChannel); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o n t r a s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ContrastImage() enhances the intensity differences between the lighter and % darker elements of the image. Set sharpen to a MagickTrue to increase the % image contrast otherwise the contrast is reduced. % % The format of the ContrastImage method is: % % MagickBooleanType ContrastImage(Image *image, % const MagickBooleanType sharpen) % % A description of each parameter follows: % % o image: the image. % % o sharpen: Increase or decrease image contrast. % */ static void Contrast(const int sign,Quantum *red,Quantum *green,Quantum *blue) { double brightness, hue, saturation; /* Enhance contrast: dark color become darker, light color become lighter. */ assert(red != (Quantum *) NULL); assert(green != (Quantum *) NULL); assert(blue != (Quantum *) NULL); hue=0.0; saturation=0.0; brightness=0.0; ConvertRGBToHSB(*red,*green,*blue,&hue,&saturation,&brightness); brightness+=0.5*sign*(0.5*(sin((double) (MagickPI*(brightness-0.5)))+1.0)- brightness); if (brightness > 1.0) brightness=1.0; else if (brightness < 0.0) brightness=0.0; ConvertHSBToRGB(hue,saturation,brightness,red,green,blue); } MagickExport MagickBooleanType ContrastImage(Image *image, const MagickBooleanType sharpen) { #define ContrastImageTag "Contrast/Image" CacheView *image_view; ExceptionInfo *exception; int sign; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); sign=sharpen != MagickFalse ? 1 : -1; if (image->storage_class == PseudoClass) { /* Contrast enhance colormap. */ for (i=0; i < (ssize_t) image->colors; i++) Contrast(sign,&image->colormap[i].red,&image->colormap[i].green, &image->colormap[i].blue); } /* Contrast enhance image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireCacheView(image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum blue, green, red; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { red=GetPixelRed(q); green=GetPixelGreen(q); blue=GetPixelBlue(q); Contrast(sign,&red,&green,&blue); SetPixelRed(q,red); SetPixelGreen(q,green); SetPixelBlue(q,blue); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ContrastImage) #endif proceed=SetImageProgress(image,ContrastImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o n t r a s t S t r e t c h I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ContrastStretchImage() is a simple image enhancement technique that attempts % to improve the contrast in an image by `stretching' the range of intensity % values it contains to span a desired range of values. It differs from the % more sophisticated histogram equalization in that it can only apply a % linear scaling function to the image pixel values. As a result the % `enhancement' is less harsh. % % The format of the ContrastStretchImage method is: % % MagickBooleanType ContrastStretchImage(Image *image, % const char *levels) % MagickBooleanType ContrastStretchImageChannel(Image *image, % const size_t channel,const double black_point, % const double white_point) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o black_point: the black point. % % o white_point: the white point. % % o levels: Specify the levels where the black and white points have the % range of 0 to number-of-pixels (e.g. 1%, 10x90%, etc.). % */ MagickExport MagickBooleanType ContrastStretchImage(Image *image, const char *levels) { double black_point, white_point; GeometryInfo geometry_info; MagickBooleanType status; MagickStatusType flags; /* Parse levels. */ if (levels == (char *) NULL) return(MagickFalse); flags=ParseGeometry(levels,&geometry_info); black_point=geometry_info.rho; white_point=(double) image->columns*image->rows; if ((flags & SigmaValue) != 0) white_point=geometry_info.sigma; if ((flags & PercentValue) != 0) { black_point*=(double) QuantumRange/100.0; white_point*=(double) QuantumRange/100.0; } if ((flags & SigmaValue) == 0) white_point=(double) image->columns*image->rows-black_point; status=ContrastStretchImageChannel(image,DefaultChannels,black_point, white_point); return(status); } MagickExport MagickBooleanType ContrastStretchImageChannel(Image *image, const ChannelType channel,const double black_point,const double white_point) { #define MaxRange(color) ((MagickRealType) ScaleQuantumToMap((Quantum) (color))) #define ContrastStretchImageTag "ContrastStretch/Image" CacheView *image_view; double intensity; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket black, *histogram, *stretch_map, white; register ssize_t i; ssize_t y; /* Allocate histogram and stretch map. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); histogram=(MagickPixelPacket *) AcquireQuantumMemory(MaxMap+1UL, sizeof(*histogram)); stretch_map=(MagickPixelPacket *) AcquireQuantumMemory(MaxMap+1UL, sizeof(*stretch_map)); if ((histogram == (MagickPixelPacket *) NULL) || (stretch_map == (MagickPixelPacket *) NULL)) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); /* Form histogram. */ status=MagickTrue; exception=(&image->exception); (void) ResetMagickMemory(histogram,0,(MaxMap+1)*sizeof(*histogram)); image_view=AcquireCacheView(image); for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *restrict p; register IndexPacket *restrict indexes; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); if (channel == DefaultChannels) for (x=0; x < (ssize_t) image->columns; x++) { Quantum intensity; intensity=PixelIntensityToQuantum(p); histogram[ScaleQuantumToMap(intensity)].red++; histogram[ScaleQuantumToMap(intensity)].green++; histogram[ScaleQuantumToMap(intensity)].blue++; histogram[ScaleQuantumToMap(intensity)].index++; p++; } else for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) histogram[ScaleQuantumToMap(GetPixelRed(p))].red++; if ((channel & GreenChannel) != 0) histogram[ScaleQuantumToMap(GetPixelGreen(p))].green++; if ((channel & BlueChannel) != 0) histogram[ScaleQuantumToMap(GetPixelBlue(p))].blue++; if ((channel & OpacityChannel) != 0) histogram[ScaleQuantumToMap(GetPixelOpacity(p))].opacity++; if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) histogram[ScaleQuantumToMap(GetPixelIndex( indexes+x))].index++; p++; } } /* Find the histogram boundaries by locating the black/white levels. */ black.red=0.0; white.red=MaxRange(QuantumRange); if ((channel & RedChannel) != 0) { intensity=0.0; for (i=0; i <= (ssize_t) MaxMap; i++) { intensity+=histogram[i].red; if (intensity > black_point) break; } black.red=(MagickRealType) i; intensity=0.0; for (i=(ssize_t) MaxMap; i != 0; i--) { intensity+=histogram[i].red; if (intensity > ((double) image->columns*image->rows-white_point)) break; } white.red=(MagickRealType) i; } black.green=0.0; white.green=MaxRange(QuantumRange); if ((channel & GreenChannel) != 0) { intensity=0.0; for (i=0; i <= (ssize_t) MaxMap; i++) { intensity+=histogram[i].green; if (intensity > black_point) break; } black.green=(MagickRealType) i; intensity=0.0; for (i=(ssize_t) MaxMap; i != 0; i--) { intensity+=histogram[i].green; if (intensity > ((double) image->columns*image->rows-white_point)) break; } white.green=(MagickRealType) i; } black.blue=0.0; white.blue=MaxRange(QuantumRange); if ((channel & BlueChannel) != 0) { intensity=0.0; for (i=0; i <= (ssize_t) MaxMap; i++) { intensity+=histogram[i].blue; if (intensity > black_point) break; } black.blue=(MagickRealType) i; intensity=0.0; for (i=(ssize_t) MaxMap; i != 0; i--) { intensity+=histogram[i].blue; if (intensity > ((double) image->columns*image->rows-white_point)) break; } white.blue=(MagickRealType) i; } black.opacity=0.0; white.opacity=MaxRange(QuantumRange); if ((channel & OpacityChannel) != 0) { intensity=0.0; for (i=0; i <= (ssize_t) MaxMap; i++) { intensity+=histogram[i].opacity; if (intensity > black_point) break; } black.opacity=(MagickRealType) i; intensity=0.0; for (i=(ssize_t) MaxMap; i != 0; i--) { intensity+=histogram[i].opacity; if (intensity > ((double) image->columns*image->rows-white_point)) break; } white.opacity=(MagickRealType) i; } black.index=0.0; white.index=MaxRange(QuantumRange); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { intensity=0.0; for (i=0; i <= (ssize_t) MaxMap; i++) { intensity+=histogram[i].index; if (intensity > black_point) break; } black.index=(MagickRealType) i; intensity=0.0; for (i=(ssize_t) MaxMap; i != 0; i--) { intensity+=histogram[i].index; if (intensity > ((double) image->columns*image->rows-white_point)) break; } white.index=(MagickRealType) i; } histogram=(MagickPixelPacket *) RelinquishMagickMemory(histogram); /* Stretch the histogram to create the stretched image mapping. */ (void) ResetMagickMemory(stretch_map,0,(MaxMap+1)*sizeof(*stretch_map)); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { if ((channel & RedChannel) != 0) { if (i < (ssize_t) black.red) stretch_map[i].red=0.0; else if (i > (ssize_t) white.red) stretch_map[i].red=(MagickRealType) QuantumRange; else if (black.red != white.red) stretch_map[i].red=(MagickRealType) ScaleMapToQuantum( (MagickRealType) (MaxMap*(i-black.red)/(white.red-black.red))); } if ((channel & GreenChannel) != 0) { if (i < (ssize_t) black.green) stretch_map[i].green=0.0; else if (i > (ssize_t) white.green) stretch_map[i].green=(MagickRealType) QuantumRange; else if (black.green != white.green) stretch_map[i].green=(MagickRealType) ScaleMapToQuantum( (MagickRealType) (MaxMap*(i-black.green)/(white.green- black.green))); } if ((channel & BlueChannel) != 0) { if (i < (ssize_t) black.blue) stretch_map[i].blue=0.0; else if (i > (ssize_t) white.blue) stretch_map[i].blue=(MagickRealType) QuantumRange; else if (black.blue != white.blue) stretch_map[i].blue=(MagickRealType) ScaleMapToQuantum( (MagickRealType) (MaxMap*(i-black.blue)/(white.blue- black.blue))); } if ((channel & OpacityChannel) != 0) { if (i < (ssize_t) black.opacity) stretch_map[i].opacity=0.0; else if (i > (ssize_t) white.opacity) stretch_map[i].opacity=(MagickRealType) QuantumRange; else if (black.opacity != white.opacity) stretch_map[i].opacity=(MagickRealType) ScaleMapToQuantum( (MagickRealType) (MaxMap*(i-black.opacity)/(white.opacity- black.opacity))); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { if (i < (ssize_t) black.index) stretch_map[i].index=0.0; else if (i > (ssize_t) white.index) stretch_map[i].index=(MagickRealType) QuantumRange; else if (black.index != white.index) stretch_map[i].index=(MagickRealType) ScaleMapToQuantum( (MagickRealType) (MaxMap*(i-black.index)/(white.index- black.index))); } } /* Stretch the image. */ if (((channel & OpacityChannel) != 0) || (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace))) image->storage_class=DirectClass; if (image->storage_class == PseudoClass) { /* Stretch colormap. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) #endif for (i=0; i < (ssize_t) image->colors; i++) { if ((channel & RedChannel) != 0) { if (black.red != white.red) image->colormap[i].red=ClampToQuantum(stretch_map[ ScaleQuantumToMap(image->colormap[i].red)].red); } if ((channel & GreenChannel) != 0) { if (black.green != white.green) image->colormap[i].green=ClampToQuantum(stretch_map[ ScaleQuantumToMap(image->colormap[i].green)].green); } if ((channel & BlueChannel) != 0) { if (black.blue != white.blue) image->colormap[i].blue=ClampToQuantum(stretch_map[ ScaleQuantumToMap(image->colormap[i].blue)].blue); } if ((channel & OpacityChannel) != 0) { if (black.opacity != white.opacity) image->colormap[i].opacity=ClampToQuantum(stretch_map[ ScaleQuantumToMap(image->colormap[i].opacity)].opacity); } } } /* Stretch image. */ status=MagickTrue; progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) { if (black.red != white.red) SetPixelRed(q,ClampToQuantum(stretch_map[ ScaleQuantumToMap(GetPixelRed(q))].red)); } if ((channel & GreenChannel) != 0) { if (black.green != white.green) SetPixelGreen(q,ClampToQuantum(stretch_map[ ScaleQuantumToMap(GetPixelGreen(q))].green)); } if ((channel & BlueChannel) != 0) { if (black.blue != white.blue) SetPixelBlue(q,ClampToQuantum(stretch_map[ ScaleQuantumToMap(GetPixelBlue(q))].blue)); } if ((channel & OpacityChannel) != 0) { if (black.opacity != white.opacity) SetPixelOpacity(q,ClampToQuantum(stretch_map[ ScaleQuantumToMap(GetPixelOpacity(q))].opacity)); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { if (black.index != white.index) SetPixelIndex(indexes+x,ClampToQuantum(stretch_map[ ScaleQuantumToMap(GetPixelIndex(indexes+x))].index)); } q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ContrastStretchImageChannel) #endif proceed=SetImageProgress(image,ContrastStretchImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); stretch_map=(MagickPixelPacket *) RelinquishMagickMemory(stretch_map); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E n h a n c e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % EnhanceImage() applies a digital filter that improves the quality of a % noisy image. % % The format of the EnhanceImage method is: % % Image *EnhanceImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *EnhanceImage(const Image *image,ExceptionInfo *exception) { #define Enhance(weight) \ mean=((MagickRealType) GetPixelRed(r)+pixel.red)/2; \ distance=(MagickRealType) GetPixelRed(r)-(MagickRealType) pixel.red; \ distance_squared=QuantumScale*(2.0*((MagickRealType) QuantumRange+1.0)+ \ mean)*distance*distance; \ mean=((MagickRealType) GetPixelGreen(r)+pixel.green)/2; \ distance=(MagickRealType) GetPixelGreen(r)-(MagickRealType) \ pixel.green; \ distance_squared+=4.0*distance*distance; \ mean=((MagickRealType) GetPixelBlue(r)+pixel.blue)/2; \ distance=(MagickRealType) GetPixelBlue(r)-(MagickRealType) \ pixel.blue; \ distance_squared+=QuantumScale*(3.0*((MagickRealType) \ QuantumRange+1.0)-1.0-mean)*distance*distance; \ mean=((MagickRealType) r->opacity+pixel.opacity)/2; \ distance=(MagickRealType) r->opacity-(MagickRealType) pixel.opacity; \ distance_squared+=QuantumScale*(3.0*((MagickRealType) \ QuantumRange+1.0)-1.0-mean)*distance*distance; \ if (distance_squared < ((MagickRealType) QuantumRange*(MagickRealType) \ QuantumRange/25.0f)) \ { \ aggregate.red+=(weight)*GetPixelRed(r); \ aggregate.green+=(weight)*GetPixelGreen(r); \ aggregate.blue+=(weight)*GetPixelBlue(r); \ aggregate.opacity+=(weight)*GetPixelOpacity(r); \ total_weight+=(weight); \ } \ r++; #define EnhanceImageTag "Enhance/Image" CacheView *enhance_view, *image_view; Image *enhance_image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket zero; ssize_t y; /* Initialize enhanced image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); if ((image->columns < 5) || (image->rows < 5)) return((Image *) NULL); enhance_image=CloneImage(image,image->columns,image->rows,MagickTrue, exception); if (enhance_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(enhance_image,DirectClass) == MagickFalse) { InheritException(exception,&enhance_image->exception); enhance_image=DestroyImage(enhance_image); return((Image *) NULL); } /* Enhance image. */ status=MagickTrue; progress=0; (void) ResetMagickMemory(&zero,0,sizeof(zero)); image_view=AcquireCacheView(image); enhance_view=AcquireCacheView(enhance_image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *restrict p; register PixelPacket *restrict q; register ssize_t x; /* Read another scan line. */ if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-2,y-2,image->columns+4,5,exception); q=QueueCacheViewAuthenticPixels(enhance_view,0,y,enhance_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { MagickPixelPacket aggregate; MagickRealType distance, distance_squared, mean, total_weight; PixelPacket pixel; register const PixelPacket *restrict r; /* Compute weighted average of target pixel color components. */ aggregate=zero; total_weight=0.0; r=p+2*(image->columns+4)+2; pixel=(*r); r=p; Enhance(5.0); Enhance(8.0); Enhance(10.0); Enhance(8.0); Enhance(5.0); r=p+(image->columns+4); Enhance(8.0); Enhance(20.0); Enhance(40.0); Enhance(20.0); Enhance(8.0); r=p+2*(image->columns+4); Enhance(10.0); Enhance(40.0); Enhance(80.0); Enhance(40.0); Enhance(10.0); r=p+3*(image->columns+4); Enhance(8.0); Enhance(20.0); Enhance(40.0); Enhance(20.0); Enhance(8.0); r=p+4*(image->columns+4); Enhance(5.0); Enhance(8.0); Enhance(10.0); Enhance(8.0); Enhance(5.0); SetPixelRed(q,(aggregate.red+(total_weight/2)-1)/total_weight); SetPixelGreen(q,(aggregate.green+(total_weight/2)-1)/ total_weight); SetPixelBlue(q,(aggregate.blue+(total_weight/2)-1)/total_weight); SetPixelOpacity(q,(aggregate.opacity+(total_weight/2)-1)/ total_weight); p++; q++; } if (SyncCacheViewAuthenticPixels(enhance_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_EnhanceImage) #endif proceed=SetImageProgress(image,EnhanceImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } enhance_view=DestroyCacheView(enhance_view); image_view=DestroyCacheView(image_view); return(enhance_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E q u a l i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % EqualizeImage() applies a histogram equalization to the image. % % The format of the EqualizeImage method is: % % MagickBooleanType EqualizeImage(Image *image) % MagickBooleanType EqualizeImageChannel(Image *image, % const ChannelType channel) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % */ MagickExport MagickBooleanType EqualizeImage(Image *image) { return(EqualizeImageChannel(image,DefaultChannels)); } MagickExport MagickBooleanType EqualizeImageChannel(Image *image, const ChannelType channel) { #define EqualizeImageTag "Equalize/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket black, *equalize_map, *histogram, intensity, *map, white; register ssize_t i; ssize_t y; /* Allocate and initialize histogram arrays. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); equalize_map=(MagickPixelPacket *) AcquireQuantumMemory(MaxMap+1UL, sizeof(*equalize_map)); histogram=(MagickPixelPacket *) AcquireQuantumMemory(MaxMap+1UL, sizeof(*histogram)); map=(MagickPixelPacket *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*map)); if ((equalize_map == (MagickPixelPacket *) NULL) || (histogram == (MagickPixelPacket *) NULL) || (map == (MagickPixelPacket *) NULL)) { if (map != (MagickPixelPacket *) NULL) map=(MagickPixelPacket *) RelinquishMagickMemory(map); if (histogram != (MagickPixelPacket *) NULL) histogram=(MagickPixelPacket *) RelinquishMagickMemory(histogram); if (equalize_map != (MagickPixelPacket *) NULL) equalize_map=(MagickPixelPacket *) RelinquishMagickMemory(equalize_map); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } /* Form histogram. */ (void) ResetMagickMemory(histogram,0,(MaxMap+1)*sizeof(*histogram)); exception=(&image->exception); for (y=0; y < (ssize_t) image->rows; y++) { register const IndexPacket *restrict indexes; register const PixelPacket *restrict p; register ssize_t x; p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; indexes=GetVirtualIndexQueue(image); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) histogram[ScaleQuantumToMap(GetPixelRed(p))].red++; if ((channel & GreenChannel) != 0) histogram[ScaleQuantumToMap(GetPixelGreen(p))].green++; if ((channel & BlueChannel) != 0) histogram[ScaleQuantumToMap(GetPixelBlue(p))].blue++; if ((channel & OpacityChannel) != 0) histogram[ScaleQuantumToMap(GetPixelOpacity(p))].opacity++; if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) histogram[ScaleQuantumToMap(GetPixelIndex(indexes+x))].index++; p++; } } /* Integrate the histogram to get the equalization map. */ (void) ResetMagickMemory(&intensity,0,sizeof(intensity)); for (i=0; i <= (ssize_t) MaxMap; i++) { if ((channel & RedChannel) != 0) intensity.red+=histogram[i].red; if ((channel & GreenChannel) != 0) intensity.green+=histogram[i].green; if ((channel & BlueChannel) != 0) intensity.blue+=histogram[i].blue; if ((channel & OpacityChannel) != 0) intensity.opacity+=histogram[i].opacity; if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) intensity.index+=histogram[i].index; map[i]=intensity; } black=map[0]; white=map[(int) MaxMap]; (void) ResetMagickMemory(equalize_map,0,(MaxMap+1)*sizeof(*equalize_map)); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { if (((channel & RedChannel) != 0) && (white.red != black.red)) equalize_map[i].red=(MagickRealType) ScaleMapToQuantum((MagickRealType) ((MaxMap*(map[i].red-black.red))/(white.red-black.red))); if (((channel & GreenChannel) != 0) && (white.green != black.green)) equalize_map[i].green=(MagickRealType) ScaleMapToQuantum((MagickRealType) ((MaxMap*(map[i].green-black.green))/(white.green-black.green))); if (((channel & BlueChannel) != 0) && (white.blue != black.blue)) equalize_map[i].blue=(MagickRealType) ScaleMapToQuantum((MagickRealType) ((MaxMap*(map[i].blue-black.blue))/(white.blue-black.blue))); if (((channel & OpacityChannel) != 0) && (white.opacity != black.opacity)) equalize_map[i].opacity=(MagickRealType) ScaleMapToQuantum( (MagickRealType) ((MaxMap*(map[i].opacity-black.opacity))/ (white.opacity-black.opacity))); if ((((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) && (white.index != black.index)) equalize_map[i].index=(MagickRealType) ScaleMapToQuantum((MagickRealType) ((MaxMap*(map[i].index-black.index))/(white.index-black.index))); } histogram=(MagickPixelPacket *) RelinquishMagickMemory(histogram); map=(MagickPixelPacket *) RelinquishMagickMemory(map); if (image->storage_class == PseudoClass) { /* Equalize colormap. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) #endif for (i=0; i < (ssize_t) image->colors; i++) { if (((channel & RedChannel) != 0) && (white.red != black.red)) image->colormap[i].red=ClampToQuantum(equalize_map[ ScaleQuantumToMap(image->colormap[i].red)].red); if (((channel & GreenChannel) != 0) && (white.green != black.green)) image->colormap[i].green=ClampToQuantum(equalize_map[ ScaleQuantumToMap(image->colormap[i].green)].green); if (((channel & BlueChannel) != 0) && (white.blue != black.blue)) image->colormap[i].blue=ClampToQuantum(equalize_map[ ScaleQuantumToMap(image->colormap[i].blue)].blue); if (((channel & OpacityChannel) != 0) && (white.opacity != black.opacity)) image->colormap[i].opacity=ClampToQuantum(equalize_map[ ScaleQuantumToMap(image->colormap[i].opacity)].opacity); } } /* Equalize image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireCacheView(image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if (((channel & RedChannel) != 0) && (white.red != black.red)) SetPixelRed(q,ClampToQuantum(equalize_map[ ScaleQuantumToMap(GetPixelRed(q))].red)); if (((channel & GreenChannel) != 0) && (white.green != black.green)) SetPixelGreen(q,ClampToQuantum(equalize_map[ ScaleQuantumToMap(GetPixelGreen(q))].green)); if (((channel & BlueChannel) != 0) && (white.blue != black.blue)) SetPixelBlue(q,ClampToQuantum(equalize_map[ ScaleQuantumToMap(GetPixelBlue(q))].blue)); if (((channel & OpacityChannel) != 0) && (white.opacity != black.opacity)) SetPixelOpacity(q,ClampToQuantum(equalize_map[ ScaleQuantumToMap(GetPixelOpacity(q))].opacity)); if ((((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) && (white.index != black.index)) SetPixelIndex(indexes+x,ClampToQuantum(equalize_map[ ScaleQuantumToMap(GetPixelIndex(indexes+x))].index)); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_EqualizeImageChannel) #endif proceed=SetImageProgress(image,EqualizeImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); equalize_map=(MagickPixelPacket *) RelinquishMagickMemory(equalize_map); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G a m m a I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GammaImage() gamma-corrects a particular image channel. The same % image viewed on different devices will have perceptual differences in the % way the image's intensities are represented on the screen. Specify % individual gamma levels for the red, green, and blue channels, or adjust % all three with the gamma parameter. Values typically range from 0.8 to 2.3. % % You can also reduce the influence of a particular channel with a gamma % value of 0. % % The format of the GammaImage method is: % % MagickBooleanType GammaImage(Image *image,const char *level) % MagickBooleanType GammaImageChannel(Image *image, % const ChannelType channel,const double gamma) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o level: the image gamma as a string (e.g. 1.6,1.2,1.0). % % o gamma: the image gamma. % */ MagickExport MagickBooleanType GammaImage(Image *image,const char *level) { GeometryInfo geometry_info; MagickPixelPacket gamma; MagickStatusType flags, status; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (level == (char *) NULL) return(MagickFalse); flags=ParseGeometry(level,&geometry_info); gamma.red=geometry_info.rho; gamma.green=geometry_info.sigma; if ((flags & SigmaValue) == 0) gamma.green=gamma.red; gamma.blue=geometry_info.xi; if ((flags & XiValue) == 0) gamma.blue=gamma.red; if ((gamma.red == 1.0) && (gamma.green == 1.0) && (gamma.blue == 1.0)) return(MagickTrue); if ((gamma.red == gamma.green) && (gamma.green == gamma.blue)) status=GammaImageChannel(image,(const ChannelType) (RedChannel | GreenChannel | BlueChannel),(double) gamma.red); else { status=GammaImageChannel(image,RedChannel,(double) gamma.red); status|=GammaImageChannel(image,GreenChannel,(double) gamma.green); status|=GammaImageChannel(image,BlueChannel,(double) gamma.blue); } return(status != 0 ? MagickTrue : MagickFalse); } MagickExport MagickBooleanType GammaImageChannel(Image *image, const ChannelType channel,const double gamma) { #define GammaCorrectImageTag "GammaCorrect/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; Quantum *gamma_map; register ssize_t i; ssize_t y; /* Allocate and initialize gamma maps. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (gamma == 1.0) return(MagickTrue); gamma_map=(Quantum *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*gamma_map)); if (gamma_map == (Quantum *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); (void) ResetMagickMemory(gamma_map,0,(MaxMap+1)*sizeof(*gamma_map)); if (gamma != 0.0) #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) #endif for (i=0; i <= (ssize_t) MaxMap; i++) gamma_map[i]=ClampToQuantum((MagickRealType) ScaleMapToQuantum(( MagickRealType) (MaxMap*pow((double) i/MaxMap,1.0/gamma)))); if (image->storage_class == PseudoClass) { /* Gamma-correct colormap. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) #endif for (i=0; i < (ssize_t) image->colors; i++) { if ((channel & RedChannel) != 0) image->colormap[i].red=gamma_map[ ScaleQuantumToMap(image->colormap[i].red)]; if ((channel & GreenChannel) != 0) image->colormap[i].green=gamma_map[ ScaleQuantumToMap(image->colormap[i].green)]; if ((channel & BlueChannel) != 0) image->colormap[i].blue=gamma_map[ ScaleQuantumToMap(image->colormap[i].blue)]; if ((channel & OpacityChannel) != 0) { if (image->matte == MagickFalse) image->colormap[i].opacity=gamma_map[ ScaleQuantumToMap(image->colormap[i].opacity)]; else image->colormap[i].opacity=(Quantum) QuantumRange- gamma_map[ScaleQuantumToMap((Quantum) (QuantumRange- image->colormap[i].opacity))]; } } } /* Gamma-correct image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireCacheView(image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if (channel == DefaultChannels) { SetPixelRed(q,gamma_map[ScaleQuantumToMap( GetPixelRed(q))]); SetPixelGreen(q,gamma_map[ScaleQuantumToMap( GetPixelGreen(q))]); SetPixelBlue(q,gamma_map[ScaleQuantumToMap( GetPixelBlue(q))]); } else { if ((channel & RedChannel) != 0) SetPixelRed(q,gamma_map[ScaleQuantumToMap( GetPixelRed(q))]); if ((channel & GreenChannel) != 0) SetPixelGreen(q,gamma_map[ScaleQuantumToMap( GetPixelGreen(q))]); if ((channel & BlueChannel) != 0) SetPixelBlue(q,gamma_map[ScaleQuantumToMap( GetPixelBlue(q))]); if ((channel & OpacityChannel) != 0) { if (image->matte == MagickFalse) SetPixelOpacity(q,gamma_map[ScaleQuantumToMap( GetPixelOpacity(q))]); else SetPixelAlpha(q,gamma_map[ScaleQuantumToMap((Quantum) GetPixelAlpha(q))]); } } q++; } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) for (x=0; x < (ssize_t) image->columns; x++) SetPixelIndex(indexes+x,gamma_map[ScaleQuantumToMap( GetPixelIndex(indexes+x))]); if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GammaImageChannel) #endif proceed=SetImageProgress(image,GammaCorrectImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); gamma_map=(Quantum *) RelinquishMagickMemory(gamma_map); if (image->gamma != 0.0) image->gamma*=gamma; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % H a l d C l u t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % HaldClutImage() applies a Hald color lookup table to the image. A Hald % color lookup table is a 3-dimensional color cube mapped to 2 dimensions. % Create it with the HALD coder. You can apply any color transformation to % the Hald image and then use this method to apply the transform to the % image. % % The format of the HaldClutImage method is: % % MagickBooleanType HaldClutImage(Image *image,Image *hald_image) % MagickBooleanType HaldClutImageChannel(Image *image, % const ChannelType channel,Image *hald_image) % % A description of each parameter follows: % % o image: the image, which is replaced by indexed CLUT values % % o hald_image: the color lookup table image for replacement color values. % % o channel: the channel. % */ static inline size_t MagickMin(const size_t x,const size_t y) { if (x < y) return(x); return(y); } MagickExport MagickBooleanType HaldClutImage(Image *image, const Image *hald_image) { return(HaldClutImageChannel(image,DefaultChannels,hald_image)); } MagickExport MagickBooleanType HaldClutImageChannel(Image *image, const ChannelType channel,const Image *hald_image) { #define HaldClutImageTag "Clut/Image" typedef struct _HaldInfo { MagickRealType x, y, z; } HaldInfo; CacheView *hald_view, *image_view; double width; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket zero; size_t cube_size, length, level; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(hald_image != (Image *) NULL); assert(hald_image->signature == MagickSignature); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); if (image->matte == MagickFalse) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); /* Hald clut image. */ status=MagickTrue; progress=0; length=MagickMin(hald_image->columns,hald_image->rows); for (level=2; (level*level*level) < length; level++) ; level*=level; cube_size=level*level; width=(double) hald_image->columns; GetMagickPixelPacket(hald_image,&zero); exception=(&image->exception); image_view=AcquireCacheView(image); hald_view=AcquireCacheView(hald_image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) #endif for (y=0; y < (ssize_t) image->rows; y++) { double offset; HaldInfo point; MagickPixelPacket pixel, pixel1, pixel2, pixel3, pixel4; register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(hald_view); pixel=zero; pixel1=zero; pixel2=zero; pixel3=zero; pixel4=zero; for (x=0; x < (ssize_t) image->columns; x++) { point.x=QuantumScale*(level-1.0)*GetPixelRed(q); point.y=QuantumScale*(level-1.0)*GetPixelGreen(q); point.z=QuantumScale*(level-1.0)*GetPixelBlue(q); offset=point.x+level*floor(point.y)+cube_size*floor(point.z); point.x-=floor(point.x); point.y-=floor(point.y); point.z-=floor(point.z); (void) InterpolateMagickPixelPacket(image,hald_view, UndefinedInterpolatePixel,fmod(offset,width),floor(offset/width), &pixel1,exception); (void) InterpolateMagickPixelPacket(image,hald_view, UndefinedInterpolatePixel,fmod(offset+level,width),floor((offset+level)/ width),&pixel2,exception); MagickPixelCompositeAreaBlend(&pixel1,pixel1.opacity,&pixel2, pixel2.opacity,point.y,&pixel3); offset+=cube_size; (void) InterpolateMagickPixelPacket(image,hald_view, UndefinedInterpolatePixel,fmod(offset,width),floor(offset/width), &pixel1,exception); (void) InterpolateMagickPixelPacket(image,hald_view, UndefinedInterpolatePixel,fmod(offset+level,width),floor((offset+level)/ width),&pixel2,exception); MagickPixelCompositeAreaBlend(&pixel1,pixel1.opacity,&pixel2, pixel2.opacity,point.y,&pixel4); MagickPixelCompositeAreaBlend(&pixel3,pixel3.opacity,&pixel4, pixel4.opacity,point.z,&pixel); if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(pixel.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(pixel.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(pixel.blue)); if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) SetPixelOpacity(q,ClampToQuantum(pixel.opacity)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,ClampToQuantum(pixel.index)); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_HaldClutImageChannel) #endif proceed=SetImageProgress(image,HaldClutImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } hald_view=DestroyCacheView(hald_view); image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L e v e l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LevelImage() adjusts the levels of a particular image channel by % scaling the colors falling between specified white and black points to % the full available quantum range. % % The parameters provided represent the black, and white points. The black % point specifies the darkest color in the image. Colors darker than the % black point are set to zero. White point specifies the lightest color in % the image. Colors brighter than the white point are set to the maximum % quantum value. % % If a '!' flag is given, map black and white colors to the given levels % rather than mapping those levels to black and white. See % LevelizeImageChannel() and LevelizeImageChannel(), below. % % Gamma specifies a gamma correction to apply to the image. % % The format of the LevelImage method is: % % MagickBooleanType LevelImage(Image *image,const char *levels) % % A description of each parameter follows: % % o image: the image. % % o levels: Specify the levels where the black and white points have the % range of 0-QuantumRange, and gamma has the range 0-10 (e.g. 10x90%+2). % A '!' flag inverts the re-mapping. % */ MagickExport MagickBooleanType LevelImage(Image *image,const char *levels) { double black_point, gamma, white_point; GeometryInfo geometry_info; MagickBooleanType status; MagickStatusType flags; /* Parse levels. */ if (levels == (char *) NULL) return(MagickFalse); flags=ParseGeometry(levels,&geometry_info); black_point=geometry_info.rho; white_point=(double) QuantumRange; if ((flags & SigmaValue) != 0) white_point=geometry_info.sigma; gamma=1.0; if ((flags & XiValue) != 0) gamma=geometry_info.xi; if ((flags & PercentValue) != 0) { black_point*=(double) image->columns*image->rows/100.0; white_point*=(double) image->columns*image->rows/100.0; } if ((flags & SigmaValue) == 0) white_point=(double) QuantumRange-black_point; if ((flags & AspectValue ) == 0) status=LevelImageChannel(image,DefaultChannels,black_point,white_point, gamma); else status=LevelizeImage(image,black_point,white_point,gamma); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L e v e l i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LevelizeImage() applies the normal level operation to the image, spreading % out the values between the black and white points over the entire range of % values. Gamma correction is also applied after the values has been mapped. % % It is typically used to improve image contrast, or to provide a controlled % linear threshold for the image. If the black and white points are set to % the minimum and maximum values found in the image, the image can be % normalized. or by swapping black and white values, negate the image. % % The format of the LevelizeImage method is: % % MagickBooleanType LevelizeImage(Image *image,const double black_point, % const double white_point,const double gamma) % MagickBooleanType LevelizeImageChannel(Image *image, % const ChannelType channel,const double black_point, % const double white_point,const double gamma) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o black_point: The level which is to be mapped to zero (black) % % o white_point: The level which is to be mapped to QuantiumRange (white) % % o gamma: adjust gamma by this factor before mapping values. % use 1.0 for purely linear stretching of image color values % */ MagickExport MagickBooleanType LevelImageChannel(Image *image, const ChannelType channel,const double black_point,const double white_point, const double gamma) { #define LevelImageTag "Level/Image" #define LevelQuantum(x) (ClampToQuantum((MagickRealType) QuantumRange* \ pow(scale*((double) (x)-black_point),1.0/gamma))) CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; register double scale; register ssize_t i; ssize_t y; /* Allocate and initialize levels map. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); scale=(white_point != black_point) ? 1.0/(white_point-black_point) : 1.0; if (image->storage_class == PseudoClass) #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) #endif for (i=0; i < (ssize_t) image->colors; i++) { /* Level colormap. */ if ((channel & RedChannel) != 0) image->colormap[i].red=LevelQuantum(image->colormap[i].red); if ((channel & GreenChannel) != 0) image->colormap[i].green=LevelQuantum(image->colormap[i].green); if ((channel & BlueChannel) != 0) image->colormap[i].blue=LevelQuantum(image->colormap[i].blue); if ((channel & OpacityChannel) != 0) image->colormap[i].opacity=LevelQuantum(image->colormap[i].opacity); } /* Level image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireCacheView(image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q,LevelQuantum(GetPixelRed(q))); if ((channel & GreenChannel) != 0) SetPixelGreen(q,LevelQuantum(GetPixelGreen(q))); if ((channel & BlueChannel) != 0) SetPixelBlue(q,LevelQuantum(GetPixelBlue(q))); if (((channel & OpacityChannel) != 0) && (image->matte == MagickTrue)) SetPixelAlpha(q,LevelQuantum(GetPixelAlpha(q))); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,LevelQuantum( GetPixelIndex(indexes+x))); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_LevelImageChannel) #endif proceed=SetImageProgress(image,LevelImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L e v e l i z e I m a g e C h a n n e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LevelizeImageChannel() applies the reversed LevelImage() operation to just % the specific channels specified. It compresses the full range of color % values, so that they lie between the given black and white points. Gamma is % applied before the values are mapped. % % LevelizeImageChannel() can be called with by using a +level command line % API option, or using a '!' on a -level or LevelImage() geometry string. % % It can be used for example de-contrast a greyscale image to the exact % levels specified. Or by using specific levels for each channel of an image % you can convert a gray-scale image to any linear color gradient, according % to those levels. % % The format of the LevelizeImageChannel method is: % % MagickBooleanType LevelizeImageChannel(Image *image, % const ChannelType channel,const char *levels) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o black_point: The level to map zero (black) to. % % o white_point: The level to map QuantiumRange (white) to. % % o gamma: adjust gamma by this factor before mapping values. % */ MagickExport MagickBooleanType LevelizeImage(Image *image, const double black_point,const double white_point,const double gamma) { MagickBooleanType status; status=LevelizeImageChannel(image,DefaultChannels,black_point,white_point, gamma); return(status); } MagickExport MagickBooleanType LevelizeImageChannel(Image *image, const ChannelType channel,const double black_point,const double white_point, const double gamma) { #define LevelizeImageTag "Levelize/Image" #define LevelizeValue(x) (ClampToQuantum(((MagickRealType) \ pow((double)(QuantumScale*(x)),1.0/gamma))*(white_point-black_point)+ \ black_point)) CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; /* Allocate and initialize levels map. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) #endif for (i=0; i < (ssize_t) image->colors; i++) { /* Level colormap. */ if ((channel & RedChannel) != 0) image->colormap[i].red=LevelizeValue(image->colormap[i].red); if ((channel & GreenChannel) != 0) image->colormap[i].green=LevelizeValue(image->colormap[i].green); if ((channel & BlueChannel) != 0) image->colormap[i].blue=LevelizeValue(image->colormap[i].blue); if ((channel & OpacityChannel) != 0) image->colormap[i].opacity=LevelizeValue(image->colormap[i].opacity); } /* Level image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireCacheView(image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q,LevelizeValue(GetPixelRed(q))); if ((channel & GreenChannel) != 0) SetPixelGreen(q,LevelizeValue(GetPixelGreen(q))); if ((channel & BlueChannel) != 0) SetPixelBlue(q,LevelizeValue(GetPixelBlue(q))); if (((channel & OpacityChannel) != 0) && (image->matte == MagickTrue)) SetPixelOpacity(q,LevelizeValue(GetPixelOpacity(q))); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,LevelizeValue( GetPixelIndex(indexes+x))); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_LevelizeImageChannel) #endif proceed=SetImageProgress(image,LevelizeImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L e v e l I m a g e C o l o r s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LevelImageColor() maps the given color to "black" and "white" values, % linearly spreading out the colors, and level values on a channel by channel % bases, as per LevelImage(). The given colors allows you to specify % different level ranges for each of the color channels separately. % % If the boolean 'invert' is set true the image values will modifyed in the % reverse direction. That is any existing "black" and "white" colors in the % image will become the color values given, with all other values compressed % appropriatally. This effectivally maps a greyscale gradient into the given % color gradient. % % The format of the LevelColorsImageChannel method is: % % MagickBooleanType LevelColorsImage(Image *image, % const MagickPixelPacket *black_color, % const MagickPixelPacket *white_color,const MagickBooleanType invert) % MagickBooleanType LevelColorsImageChannel(Image *image, % const ChannelType channel,const MagickPixelPacket *black_color, % const MagickPixelPacket *white_color,const MagickBooleanType invert) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o black_color: The color to map black to/from % % o white_point: The color to map white to/from % % o invert: if true map the colors (levelize), rather than from (level) % */ MagickExport MagickBooleanType LevelColorsImage(Image *image, const MagickPixelPacket *black_color,const MagickPixelPacket *white_color, const MagickBooleanType invert) { MagickBooleanType status; status=LevelColorsImageChannel(image,DefaultChannels,black_color,white_color, invert); return(status); } MagickExport MagickBooleanType LevelColorsImageChannel(Image *image, const ChannelType channel,const MagickPixelPacket *black_color, const MagickPixelPacket *white_color,const MagickBooleanType invert) { MagickStatusType status; /* Allocate and initialize levels map. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=MagickFalse; if (invert == MagickFalse) { if ((channel & RedChannel) != 0) status|=LevelImageChannel(image,RedChannel, black_color->red,white_color->red,(double) 1.0); if ((channel & GreenChannel) != 0) status|=LevelImageChannel(image,GreenChannel, black_color->green,white_color->green,(double) 1.0); if ((channel & BlueChannel) != 0) status|=LevelImageChannel(image,BlueChannel, black_color->blue,white_color->blue,(double) 1.0); if (((channel & OpacityChannel) != 0) && (image->matte == MagickTrue)) status|=LevelImageChannel(image,OpacityChannel, black_color->opacity,white_color->opacity,(double) 1.0); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) status|=LevelImageChannel(image,IndexChannel, black_color->index,white_color->index,(double) 1.0); } else { if ((channel & RedChannel) != 0) status|=LevelizeImageChannel(image,RedChannel, black_color->red,white_color->red,(double) 1.0); if ((channel & GreenChannel) != 0) status|=LevelizeImageChannel(image,GreenChannel, black_color->green,white_color->green,(double) 1.0); if ((channel & BlueChannel) != 0) status|=LevelizeImageChannel(image,BlueChannel, black_color->blue,white_color->blue,(double) 1.0); if (((channel & OpacityChannel) != 0) && (image->matte == MagickTrue)) status|=LevelizeImageChannel(image,OpacityChannel, black_color->opacity,white_color->opacity,(double) 1.0); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) status|=LevelizeImageChannel(image,IndexChannel, black_color->index,white_color->index,(double) 1.0); } return(status == 0 ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L i n e a r S t r e t c h I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LinearStretchImage() discards any pixels below the black point and above % the white point and levels the remaining pixels. % % The format of the LinearStretchImage method is: % % MagickBooleanType LinearStretchImage(Image *image, % const double black_point,const double white_point) % % A description of each parameter follows: % % o image: the image. % % o black_point: the black point. % % o white_point: the white point. % */ MagickExport MagickBooleanType LinearStretchImage(Image *image, const double black_point,const double white_point) { #define LinearStretchImageTag "LinearStretch/Image" ExceptionInfo *exception; MagickBooleanType status; MagickRealType *histogram, intensity; ssize_t black, white, y; /* Allocate histogram and linear map. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); histogram=(MagickRealType *) AcquireQuantumMemory(MaxMap+1UL, sizeof(*histogram)); if (histogram == (MagickRealType *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); /* Form histogram. */ (void) ResetMagickMemory(histogram,0,(MaxMap+1)*sizeof(*histogram)); exception=(&image->exception); for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *restrict p; register ssize_t x; p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; for (x=(ssize_t) image->columns-1; x >= 0; x--) { histogram[ScaleQuantumToMap(PixelIntensityToQuantum(p))]++; p++; } } /* Find the histogram boundaries by locating the black and white point levels. */ intensity=0.0; for (black=0; black < (ssize_t) MaxMap; black++) { intensity+=histogram[black]; if (intensity >= black_point) break; } intensity=0.0; for (white=(ssize_t) MaxMap; white != 0; white--) { intensity+=histogram[white]; if (intensity >= white_point) break; } histogram=(MagickRealType *) RelinquishMagickMemory(histogram); status=LevelImageChannel(image,DefaultChannels,(double) black,(double) white, 1.0); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M o d u l a t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ModulateImage() lets you control the brightness, saturation, and hue % of an image. Modulate represents the brightness, saturation, and hue % as one parameter (e.g. 90,150,100). If the image colorspace is HSL, the % modulation is lightness, saturation, and hue. And if the colorspace is % HWB, use blackness, whiteness, and hue. % % The format of the ModulateImage method is: % % MagickBooleanType ModulateImage(Image *image,const char *modulate) % % A description of each parameter follows: % % o image: the image. % % o modulate: Define the percent change in brightness, saturation, and % hue. % */ static void ModulateHSB(const double percent_hue, const double percent_saturation,const double percent_brightness, Quantum *red,Quantum *green,Quantum *blue) { double brightness, hue, saturation; /* Increase or decrease color brightness, saturation, or hue. */ assert(red != (Quantum *) NULL); assert(green != (Quantum *) NULL); assert(blue != (Quantum *) NULL); ConvertRGBToHSB(*red,*green,*blue,&hue,&saturation,&brightness); hue+=0.5*(0.01*percent_hue-1.0); while (hue < 0.0) hue+=1.0; while (hue > 1.0) hue-=1.0; saturation*=0.01*percent_saturation; brightness*=0.01*percent_brightness; ConvertHSBToRGB(hue,saturation,brightness,red,green,blue); } static void ModulateHSL(const double percent_hue, const double percent_saturation,const double percent_lightness, Quantum *red,Quantum *green,Quantum *blue) { double hue, lightness, saturation; /* Increase or decrease color lightness, saturation, or hue. */ assert(red != (Quantum *) NULL); assert(green != (Quantum *) NULL); assert(blue != (Quantum *) NULL); ConvertRGBToHSL(*red,*green,*blue,&hue,&saturation,&lightness); hue+=0.5*(0.01*percent_hue-1.0); while (hue < 0.0) hue+=1.0; while (hue > 1.0) hue-=1.0; saturation*=0.01*percent_saturation; lightness*=0.01*percent_lightness; ConvertHSLToRGB(hue,saturation,lightness,red,green,blue); } static void ModulateHWB(const double percent_hue,const double percent_whiteness, const double percent_blackness,Quantum *red,Quantum *green,Quantum *blue) { double blackness, hue, whiteness; /* Increase or decrease color blackness, whiteness, or hue. */ assert(red != (Quantum *) NULL); assert(green != (Quantum *) NULL); assert(blue != (Quantum *) NULL); ConvertRGBToHWB(*red,*green,*blue,&hue,&whiteness,&blackness); hue+=0.5*(0.01*percent_hue-1.0); while (hue < 0.0) hue+=1.0; while (hue > 1.0) hue-=1.0; blackness*=0.01*percent_blackness; whiteness*=0.01*percent_whiteness; ConvertHWBToRGB(hue,whiteness,blackness,red,green,blue); } MagickExport MagickBooleanType ModulateImage(Image *image,const char *modulate) { #define ModulateImageTag "Modulate/Image" CacheView *image_view; ColorspaceType colorspace; const char *artifact; double percent_brightness, percent_hue, percent_saturation; ExceptionInfo *exception; GeometryInfo geometry_info; MagickBooleanType status; MagickOffsetType progress; MagickStatusType flags; register ssize_t i; ssize_t y; /* Initialize modulate table. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (modulate == (char *) NULL) return(MagickFalse); flags=ParseGeometry(modulate,&geometry_info); percent_brightness=geometry_info.rho; percent_saturation=geometry_info.sigma; if ((flags & SigmaValue) == 0) percent_saturation=100.0; percent_hue=geometry_info.xi; if ((flags & XiValue) == 0) percent_hue=100.0; colorspace=UndefinedColorspace; artifact=GetImageArtifact(image,"modulate:colorspace"); if (artifact != (const char *) NULL) colorspace=(ColorspaceType) ParseCommandOption(MagickColorspaceOptions, MagickFalse,artifact); if (image->storage_class == PseudoClass) { /* Modulate colormap. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) #endif for (i=0; i < (ssize_t) image->colors; i++) switch (colorspace) { case HSBColorspace: { ModulateHSB(percent_hue,percent_saturation,percent_brightness, &image->colormap[i].red,&image->colormap[i].green, &image->colormap[i].blue); break; } case HSLColorspace: default: { ModulateHSL(percent_hue,percent_saturation,percent_brightness, &image->colormap[i].red,&image->colormap[i].green, &image->colormap[i].blue); break; } case HWBColorspace: { ModulateHWB(percent_hue,percent_saturation,percent_brightness, &image->colormap[i].red,&image->colormap[i].green, &image->colormap[i].blue); break; } } } /* Modulate image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireCacheView(image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum blue, green, red; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { red=GetPixelRed(q); green=GetPixelGreen(q); blue=GetPixelBlue(q); switch (colorspace) { case HSBColorspace: { ModulateHSB(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSLColorspace: default: { ModulateHSL(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HWBColorspace: { ModulateHWB(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } } SetPixelRed(q,red); SetPixelGreen(q,green); SetPixelBlue(q,blue); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ModulateImage) #endif proceed=SetImageProgress(image,ModulateImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e g a t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NegateImage() negates the colors in the reference image. The grayscale % option means that only grayscale values within the image are negated. % % The format of the NegateImageChannel method is: % % MagickBooleanType NegateImage(Image *image, % const MagickBooleanType grayscale) % MagickBooleanType NegateImageChannel(Image *image, % const ChannelType channel,const MagickBooleanType grayscale) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o grayscale: If MagickTrue, only negate grayscale pixels within the image. % */ MagickExport MagickBooleanType NegateImage(Image *image, const MagickBooleanType grayscale) { MagickBooleanType status; status=NegateImageChannel(image,DefaultChannels,grayscale); return(status); } MagickExport MagickBooleanType NegateImageChannel(Image *image, const ChannelType channel,const MagickBooleanType grayscale) { #define NegateImageTag "Negate/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) { /* Negate colormap. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) #endif for (i=0; i < (ssize_t) image->colors; i++) { if (grayscale != MagickFalse) if ((image->colormap[i].red != image->colormap[i].green) || (image->colormap[i].green != image->colormap[i].blue)) continue; if ((channel & RedChannel) != 0) image->colormap[i].red=(Quantum) QuantumRange- image->colormap[i].red; if ((channel & GreenChannel) != 0) image->colormap[i].green=(Quantum) QuantumRange- image->colormap[i].green; if ((channel & BlueChannel) != 0) image->colormap[i].blue=(Quantum) QuantumRange- image->colormap[i].blue; } } /* Negate image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireCacheView(image); if (grayscale != MagickFalse) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((GetPixelRed(q) != GetPixelGreen(q)) || (GetPixelGreen(q) != GetPixelBlue(q))) { q++; continue; } if ((channel & RedChannel) != 0) SetPixelRed(q,QuantumRange-GetPixelRed(q)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,QuantumRange-GetPixelGreen(q)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,QuantumRange-GetPixelBlue(q)); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,QuantumRange- GetPixelOpacity(q)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,QuantumRange- GetPixelIndex(indexes+x)); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_NegateImageChannel) #endif proceed=SetImageProgress(image,NegateImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(MagickTrue); } /* Negate image. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q,QuantumRange-GetPixelRed(q)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,QuantumRange-GetPixelGreen(q)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,QuantumRange-GetPixelBlue(q)); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,QuantumRange-GetPixelOpacity(q)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,QuantumRange- GetPixelIndex(indexes+x)); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_NegateImageChannel) #endif proceed=SetImageProgress(image,NegateImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N o r m a l i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % The NormalizeImage() method enhances the contrast of a color image by % mapping the darkest 2 percent of all pixel to black and the brightest % 1 percent to white. % % The format of the NormalizeImage method is: % % MagickBooleanType NormalizeImage(Image *image) % MagickBooleanType NormalizeImageChannel(Image *image, % const ChannelType channel) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % */ MagickExport MagickBooleanType NormalizeImage(Image *image) { MagickBooleanType status; status=NormalizeImageChannel(image,DefaultChannels); return(status); } MagickExport MagickBooleanType NormalizeImageChannel(Image *image, const ChannelType channel) { double black_point, white_point; black_point=(double) image->columns*image->rows*0.0015; white_point=(double) image->columns*image->rows*0.9995; return(ContrastStretchImageChannel(image,channel,black_point,white_point)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S i g m o i d a l C o n t r a s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SigmoidalContrastImage() adjusts the contrast of an image with a non-linear % sigmoidal contrast algorithm. Increase the contrast of the image using a % sigmoidal transfer function without saturating highlights or shadows. % Contrast indicates how much to increase the contrast (0 is none; 3 is % typical; 20 is pushing it); mid-point indicates where midtones fall in the % resultant image (0 is white; 50% is middle-gray; 100% is black). Set % sharpen to MagickTrue to increase the image contrast otherwise the contrast % is reduced. % % The format of the SigmoidalContrastImage method is: % % MagickBooleanType SigmoidalContrastImage(Image *image, % const MagickBooleanType sharpen,const char *levels) % MagickBooleanType SigmoidalContrastImageChannel(Image *image, % const ChannelType channel,const MagickBooleanType sharpen, % const double contrast,const double midpoint) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o sharpen: Increase or decrease image contrast. % % o alpha: strength of the contrast, the larger the number the more % 'threshold-like' it becomes. % % o beta: midpoint of the function as a color value 0 to QuantumRange. % */ MagickExport MagickBooleanType SigmoidalContrastImage(Image *image, const MagickBooleanType sharpen,const char *levels) { GeometryInfo geometry_info; MagickBooleanType status; MagickStatusType flags; flags=ParseGeometry(levels,&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0*QuantumRange/2.0; if ((flags & PercentValue) != 0) geometry_info.sigma=1.0*QuantumRange*geometry_info.sigma/100.0; status=SigmoidalContrastImageChannel(image,DefaultChannels,sharpen, geometry_info.rho,geometry_info.sigma); return(status); } MagickExport MagickBooleanType SigmoidalContrastImageChannel(Image *image, const ChannelType channel,const MagickBooleanType sharpen, const double contrast,const double midpoint) { #define SigmoidalContrastImageTag "SigmoidalContrast/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; MagickRealType *sigmoidal_map; register ssize_t i; ssize_t y; /* Allocate and initialize sigmoidal maps. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); sigmoidal_map=(MagickRealType *) AcquireQuantumMemory(MaxMap+1UL, sizeof(*sigmoidal_map)); if (sigmoidal_map == (MagickRealType *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); (void) ResetMagickMemory(sigmoidal_map,0,(MaxMap+1)*sizeof(*sigmoidal_map)); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { if (sharpen != MagickFalse) { sigmoidal_map[i]=(MagickRealType) ScaleMapToQuantum((MagickRealType) (MaxMap*((1.0/(1.0+exp(contrast*(midpoint/(double) QuantumRange- (double) i/MaxMap))))-(1.0/(1.0+exp(contrast*(midpoint/ (double) QuantumRange)))))/((1.0/(1.0+exp(contrast*(midpoint/ (double) QuantumRange-1.0))))-(1.0/(1.0+exp(contrast*(midpoint/ (double) QuantumRange)))))+0.5)); continue; } sigmoidal_map[i]=(MagickRealType) ScaleMapToQuantum((MagickRealType) (MaxMap*(QuantumScale*midpoint-log((1.0-(1.0/(1.0+exp(midpoint/ (double) QuantumRange*contrast))+((double) i/MaxMap)*((1.0/ (1.0+exp(contrast*(midpoint/(double) QuantumRange-1.0))))-(1.0/ (1.0+exp(midpoint/(double) QuantumRange*contrast))))))/ (1.0/(1.0+exp(midpoint/(double) QuantumRange*contrast))+ ((double) i/MaxMap)*((1.0/(1.0+exp(contrast*(midpoint/ (double) QuantumRange-1.0))))-(1.0/(1.0+exp(midpoint/ (double) QuantumRange*contrast))))))/contrast))); } if (image->storage_class == PseudoClass) { /* Sigmoidal-contrast enhance colormap. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) #endif for (i=0; i < (ssize_t) image->colors; i++) { if ((channel & RedChannel) != 0) image->colormap[i].red=ClampToQuantum(sigmoidal_map[ ScaleQuantumToMap(image->colormap[i].red)]); if ((channel & GreenChannel) != 0) image->colormap[i].green=ClampToQuantum(sigmoidal_map[ ScaleQuantumToMap(image->colormap[i].green)]); if ((channel & BlueChannel) != 0) image->colormap[i].blue=ClampToQuantum(sigmoidal_map[ ScaleQuantumToMap(image->colormap[i].blue)]); if ((channel & OpacityChannel) != 0) image->colormap[i].opacity=ClampToQuantum(sigmoidal_map[ ScaleQuantumToMap(image->colormap[i].opacity)]); } } /* Sigmoidal-contrast enhance image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireCacheView(image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(sigmoidal_map[ScaleQuantumToMap( GetPixelRed(q))])); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(sigmoidal_map[ScaleQuantumToMap( GetPixelGreen(q))])); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(sigmoidal_map[ScaleQuantumToMap( GetPixelBlue(q))])); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,ClampToQuantum(sigmoidal_map[ ScaleQuantumToMap(GetPixelOpacity(q))])); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,ClampToQuantum(sigmoidal_map[ ScaleQuantumToMap(GetPixelIndex(indexes+x))])); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SigmoidalContrastImageChannel) #endif proceed=SetImageProgress(image,SigmoidalContrastImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); sigmoidal_map=(MagickRealType *) RelinquishMagickMemory(sigmoidal_map); return(status); }
9600.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4000. */ #include "3mm.h" /* Array initialization. */ static void init_array(int ni, int nj, int nk, int nl, int nm, DATA_TYPE POLYBENCH_2D(A,NI,NK,ni,nk), DATA_TYPE POLYBENCH_2D(B,NK,NJ,nk,nj), DATA_TYPE POLYBENCH_2D(C,NJ,NM,nj,nm), DATA_TYPE POLYBENCH_2D(D,NM,NL,nm,nl)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nk; j++) A[i][j] = ((DATA_TYPE) i*j) / ni; for (i = 0; i < nk; i++) for (j = 0; j < nj; j++) B[i][j] = ((DATA_TYPE) i*(j+1)) / nj; for (i = 0; i < nj; i++) for (j = 0; j < nm; j++) C[i][j] = ((DATA_TYPE) i*(j+3)) / nl; for (i = 0; i < nm; i++) for (j = 0; j < nl; j++) D[i][j] = ((DATA_TYPE) i*(j+2)) / nk; } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int ni, int nl, DATA_TYPE POLYBENCH_2D(G,NI,NL,ni,nl)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nl; j++) { fprintf (stderr, DATA_PRINTF_MODIFIER, G[i][j]); if ((i * ni + j) % 20 == 0) fprintf (stderr, "\n"); } fprintf (stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_3mm(int ni, int nj, int nk, int nl, int nm, DATA_TYPE POLYBENCH_2D(E,NI,NJ,ni,nj), DATA_TYPE POLYBENCH_2D(A,NI,NK,ni,nk), DATA_TYPE POLYBENCH_2D(B,NK,NJ,nk,nj), DATA_TYPE POLYBENCH_2D(F,NJ,NL,nj,nl), DATA_TYPE POLYBENCH_2D(C,NJ,NM,nj,nm), DATA_TYPE POLYBENCH_2D(D,NM,NL,nm,nl), DATA_TYPE POLYBENCH_2D(G,NI,NL,ni,nl)) { int i, j, k; #pragma scop #pragma omp parallel private (j, k) num_threads(1) { /* E := A*B */ #pragma omp for schedule(dynamic, 1) for (i = 0; i < _PB_NI; i++) for (j = 0; j < _PB_NJ; j++) { E[i][j] = 0; for (k = 0; k < _PB_NK; ++k) E[i][j] += A[i][k] * B[k][j]; } /* F := C*D */ #pragma omp for schedule(dynamic, 1) for (i = 0; i < _PB_NJ; i++) for (j = 0; j < _PB_NL; j++) { F[i][j] = 0; for (k = 0; k < _PB_NM; ++k) F[i][j] += C[i][k] * D[k][j]; } /* G := E*F */ #pragma omp for schedule(dynamic, 1) for (i = 0; i < _PB_NI; i++) for (j = 0; j < _PB_NL; j++) { G[i][j] = 0; for (k = 0; k < _PB_NJ; ++k) G[i][j] += E[i][k] * F[k][j]; } } #pragma endscop } int main(int argc, char** argv) { /* Retrieve problem size. */ int ni = NI; int nj = NJ; int nk = NK; int nl = NL; int nm = NM; /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(E, DATA_TYPE, NI, NJ, ni, nj); POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NK, ni, nk); POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NK, NJ, nk, nj); POLYBENCH_2D_ARRAY_DECL(F, DATA_TYPE, NJ, NL, nj, nl); POLYBENCH_2D_ARRAY_DECL(C, DATA_TYPE, NJ, NM, nj, nm); POLYBENCH_2D_ARRAY_DECL(D, DATA_TYPE, NM, NL, nm, nl); POLYBENCH_2D_ARRAY_DECL(G, DATA_TYPE, NI, NL, ni, nl); /* Initialize array(s). */ init_array (ni, nj, nk, nl, nm, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B), POLYBENCH_ARRAY(C), POLYBENCH_ARRAY(D)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_3mm (ni, nj, nk, nl, nm, POLYBENCH_ARRAY(E), POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B), POLYBENCH_ARRAY(F), POLYBENCH_ARRAY(C), POLYBENCH_ARRAY(D), POLYBENCH_ARRAY(G)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(ni, nl, POLYBENCH_ARRAY(G))); /* Be clean. */ POLYBENCH_FREE_ARRAY(E); POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(B); POLYBENCH_FREE_ARRAY(F); POLYBENCH_FREE_ARRAY(C); POLYBENCH_FREE_ARRAY(D); POLYBENCH_FREE_ARRAY(G); return 0; }
DRB063-outeronly1-orig-no.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Only the outmost loop can be parallelized. */ #include <omp.h> int n = 100; int m = 100; double b[100][100]; int init() { int i; int j; int k; #pragma omp parallel for private (i,j) firstprivate (n,m) for (i = 0; i <= n - 1; i += 1) { #pragma omp parallel for private (j) for (j = 0; j <= m - 1; j += 1) { b[i][j] = (i * j); } } return 0; } void foo() { int i; int j; #pragma omp parallel for private (i,j) firstprivate (n,m) for (i = 0; i <= n - 1; i += 1) { // Be careful about bounds of j for (j = 0; j <= m - 1 - 1; j += 1) { b[i][j] = b[i][j + 1]; } } } int print() { int i; int j; int k; for (i = 0; i <= n - 1; i += 1) { for (j = 0; j <= m - 1; j += 1) { printf("%lf\n",b[i][j]); } } return 0; } int main() { init(); foo(); print(); return 0; }
ast-dump-openmp-target-teams-distribute-simd.c
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s void test_one(int x) { #pragma omp target teams distribute simd for (int i = 0; i < x; i++) ; } void test_two(int x, int y) { #pragma omp target teams distribute simd for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) ; } void test_three(int x, int y) { #pragma omp target teams distribute simd collapse(1) for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) ; } void test_four(int x, int y) { #pragma omp target teams distribute simd collapse(2) for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) ; } void test_five(int x, int y, int z) { #pragma omp target teams distribute simd collapse(2) for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) for (int i = 0; i < z; i++) ; } // CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK: |-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-target-teams-distribute-simd.c:3:1, line:7:1> line:3:6 test_one 'void (int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:22, line:7:1> // CHECK-NEXT: | `-OMPTargetTeamsDistributeSimdDirective {{.*}} <line:4:1, col:41> // CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit> // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:5:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-CapturedStmt {{.*}} <col:3, line:6:5> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:5:3, line:6:5> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:5:3, line:6:5> // CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:5:3, line:6:5> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:5:8, col:17> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:6:5> openmp_structured_block // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:4:1) *const restrict' // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:4:1) *const restrict' // CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:5:23> col:23 implicit 'int' // CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <col:3, line:6:5> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:5:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:6:5> openmp_structured_block // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:4:1) *const restrict' // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:4:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:5:23> col:23 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-CapturedStmt {{.*}} <col:3, line:6:5> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:5:3, line:6:5> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:5:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:6:5> openmp_structured_block // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:4:1) *const restrict' // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:4:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:5:23> col:23 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-ForStmt {{.*}} <col:3, line:6:5> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:5:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:6:5> openmp_structured_block // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:4:1) *const restrict' // CHECK-NEXT: | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: |-FunctionDecl {{.*}} <line:9:1, line:14:1> line:9:6 test_two 'void (int, int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:22, col:26> col:26 used y 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:29, line:14:1> // CHECK-NEXT: | `-OMPTargetTeamsDistributeSimdDirective {{.*}} <line:10:1, col:41> // CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit> // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:11:8, col:17> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:12:5, line:13:7> openmp_structured_block // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:12:10, col:19> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:13:7> // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:10:1) *const restrict' // CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:10:1) *const restrict' // CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:11:23> col:23 implicit 'int' // CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:12:25> col:25 implicit 'int' // CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:11:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:12:5, line:13:7> openmp_structured_block // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:12:10, col:19> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:13:7> // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:10:1) *const restrict' // CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:10:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:11:23> col:23 implicit 'int' // CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:12:25> col:25 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:11:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:12:5, line:13:7> openmp_structured_block // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:12:10, col:19> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:13:7> // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:10:1) *const restrict' // CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:10:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:11:23> col:23 implicit 'int' // CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:12:25> col:25 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-ForStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:11:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ForStmt {{.*}} <line:12:5, line:13:7> openmp_structured_block // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:12:10, col:19> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:13:7> // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:10:1) *const restrict' // CHECK-NEXT: | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: |-FunctionDecl {{.*}} <line:16:1, line:21:1> line:16:6 test_three 'void (int, int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:17, col:21> col:21 used x 'int' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:24, col:28> col:28 used y 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:31, line:21:1> // CHECK-NEXT: | `-OMPTargetTeamsDistributeSimdDirective {{.*}} <line:17:1, col:53> // CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:42, col:52> // CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:51> 'int' // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:51> 'int' 1 // CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit> // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:18:8, col:17> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:19:5, line:20:7> openmp_structured_block // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:19:10, col:19> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:20:7> // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:17:1) *const restrict' // CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:17:1) *const restrict' // CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:18:23> col:23 implicit 'int' // CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:19:25> col:25 implicit 'int' // CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:18:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:19:5, line:20:7> openmp_structured_block // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:19:10, col:19> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:20:7> // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:17:1) *const restrict' // CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:17:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:18:23> col:23 implicit 'int' // CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:19:25> col:25 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:18:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:19:5, line:20:7> openmp_structured_block // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:19:10, col:19> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:20:7> // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:17:1) *const restrict' // CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:17:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:18:23> col:23 implicit 'int' // CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:19:25> col:25 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-ForStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:18:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ForStmt {{.*}} <line:19:5, line:20:7> openmp_structured_block // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:19:10, col:19> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:20:7> // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:17:1) *const restrict' // CHECK-NEXT: | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: |-FunctionDecl {{.*}} <line:23:1, line:28:1> line:23:6 test_four 'void (int, int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:30, line:28:1> // CHECK-NEXT: | `-OMPTargetTeamsDistributeSimdDirective {{.*}} <line:24:1, col:53> // CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:42, col:52> // CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:51> 'int' // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:51> 'int' 2 // CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit> // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:25:8, col:17> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:26:5, line:27:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:26:10, col:19> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:27:7> openmp_structured_block // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:24:1) *const restrict' // CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:24:1) *const restrict' // CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:25:23> col:23 implicit 'int' // CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:26:25> col:25 implicit 'int' // CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:25:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:26:5, line:27:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:26:10, col:19> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:27:7> openmp_structured_block // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:24:1) *const restrict' // CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:24:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:25:23> col:23 implicit 'int' // CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:26:25> col:25 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:25:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:26:5, line:27:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:26:10, col:19> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:27:7> openmp_structured_block // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:24:1) *const restrict' // CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:24:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:25:23> col:23 implicit 'int' // CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:26:25> col:25 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-ForStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:25:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ForStmt {{.*}} <line:26:5, line:27:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:26:10, col:19> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:27:7> openmp_structured_block // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:24:1) *const restrict' // CHECK-NEXT: | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: `-FunctionDecl {{.*}} <line:30:1, line:36:1> line:30:6 test_five 'void (int, int, int)' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:30, col:34> col:34 used z 'int' // CHECK-NEXT: `-CompoundStmt {{.*}} <col:37, line:36:1> // CHECK-NEXT: `-OMPTargetTeamsDistributeSimdDirective {{.*}} <line:31:1, col:53> // CHECK-NEXT: |-OMPCollapseClause {{.*}} <col:42, col:52> // CHECK-NEXT: | `-ConstantExpr {{.*}} <col:51> 'int' // CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:51> 'int' 2 // CHECK-NEXT: |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit> // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: `-CapturedStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | |-CapturedStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | |-CapturedStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | |-ForStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:32:8, col:17> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:33:5, line:35:9> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:33:10, col:19> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:34:7, line:35:9> openmp_structured_block // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:34:12, col:21> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:35:9> // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:31:1) *const restrict' // CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:31:1) *const restrict' // CHECK-NEXT: | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:32:23> col:23 implicit 'int' // CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:33:25> col:25 implicit 'int' // CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | | `-FieldDecl {{.*}} <line:34:27> col:27 implicit 'int' // CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | |-ForStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:32:8, col:17> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:33:5, line:35:9> // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:33:10, col:19> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:34:7, line:35:9> openmp_structured_block // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:34:12, col:21> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:35:9> // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:31:1) *const restrict' // CHECK-NEXT: | | | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:31:1) *const restrict' // CHECK-NEXT: | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | |-FieldDecl {{.*}} <line:32:23> col:23 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | |-FieldDecl {{.*}} <line:33:25> col:25 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | `-FieldDecl {{.*}} <line:34:27> col:27 implicit 'int' // CHECK-NEXT: | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | |-CapturedStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | |-ForStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:32:8, col:17> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:33:5, line:35:9> // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:33:10, col:19> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:34:7, line:35:9> openmp_structured_block // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:34:12, col:21> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:35:9> // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:31:1) *const restrict' // CHECK-NEXT: | | | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:31:1) *const restrict' // CHECK-NEXT: | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | |-FieldDecl {{.*}} <line:32:23> col:23 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | |-FieldDecl {{.*}} <line:33:25> col:25 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | `-FieldDecl {{.*}} <line:34:27> col:27 implicit 'int' // CHECK-NEXT: | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | |-ForStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: | | |-DeclStmt {{.*}} <line:32:8, col:17> // CHECK-NEXT: | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | |-<<<NULL>>> // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | `-ForStmt {{.*}} <line:33:5, line:35:9> // CHECK-NEXT: | | |-DeclStmt {{.*}} <line:33:10, col:19> // CHECK-NEXT: | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | |-<<<NULL>>> // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | `-ForStmt {{.*}} <line:34:7, line:35:9> openmp_structured_block // CHECK-NEXT: | | |-DeclStmt {{.*}} <line:34:12, col:21> // CHECK-NEXT: | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | |-<<<NULL>>> // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<' // CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | `-NullStmt {{.*}} <line:35:9> // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:31:1) *const restrict' // CHECK-NEXT: | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
softplus_ref.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2021, OPEN AI LAB * Author: qtang@openailab.com */ #include "graph/tensor.h" #include "graph/node.h" #include "graph/graph.h" #include "module/module.h" #include "operator/op.h" #include "device/cpu/cpu_node.h" #include "device/cpu/cpu_graph.h" #include "device/cpu/cpu_module.h" #include "utility/float.h" #include "utility/sys_port.h" #include "utility/log.h" #include <math.h> #if defined(__APPLE__) || defined(_MSC_VER) #include <stdio.h> #endif int ref_softplus_fp32(struct tensor* input_tensor, struct tensor* output_tensor, int num_thread) { int w = input_tensor->dims[3]; int h = output_tensor->dims[2]; int channels = input_tensor->dims[1]; int size = h * w; int c_step = h * w; float* input_data = (float*)input_tensor->data; float* out_data = (float*)output_tensor->data; #pragma omp parallel for num_threads(num_thread) for (int q = 0; q < channels; q++) { float* src = input_data + c_step * q; float* dst = out_data + c_step * q; for (int i = 0; i < size; i++) { dst[i] = log(exp(src[i]) + 1.0f); } } return 0; } static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { struct node* ir_node = exec_node->ir_node; struct graph* ir_graph = ir_node->graph; struct tensor* input_tensor; struct tensor* output_tensor; input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]); output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]); int ret = -1; if(input_tensor->data_type == TENGINE_DT_FP32) ret = ref_softplus_fp32(input_tensor, output_tensor, exec_graph->num_thread); else printf("Input data type %d not to be supported.\n", input_tensor->data_type); return ret; } static int reshape(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { struct node* node = exec_node->ir_node; struct graph* ir_graph = node->graph; struct tensor* input = get_ir_graph_tensor(ir_graph, node->input_tensors[0]); struct tensor* output = get_ir_graph_tensor(ir_graph, node->output_tensors[0]); int ret = set_ir_tensor_shape(output, input->dims, input->dim_num); return ret; } static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct node* exec_node) { return OPS_SCORE_CANDO; } static struct node_ops hcl_node_ops = { .prerun = NULL, .run = run, .reshape = reshape, .postrun = NULL, .init_node = init_node, .release_node = release_node, .score = score }; int register_softplus_ref_op() { return register_builtin_node_ops(OP_SOFTPLUS, &hcl_node_ops); } int unregister_softplus_ref_op() { return unregister_builtin_node_ops(OP_SOFTPLUS, &hcl_node_ops); }
double_ptr.c
#include <stdlib.h> void kernel(int dim1, int dim2, int** arr) { int i = 0, j = 0; #pragma omp parallel for for(i = 0; i < dim1; i++) for(j = 0; j < dim2; j++) arr[i][j] = i + j; } int main(int argc, char** argv) { int dim1 = 10, dim2 = 10, i = 0; int** arr = (int**)malloc(sizeof(int*) * dim1); int* arr_hidden = (int*)malloc(sizeof(int) * dim1 * dim2); for(i = 0; i < dim1; i++) arr[i] = &arr_hidden[dim2 * i]; kernel(dim1, dim2, arr); return 0; }
image.c
#include <stddef.h> #include <stdint.h> #include <stdlib.h> void neuralops_omp_image_crop( size_t in_width, size_t in_height, size_t chan, size_t crop_w, size_t crop_h, ptrdiff_t offset_x, ptrdiff_t offset_y, const float *in_pixels, float *out_pixels) { size_t p_limit = crop_w * crop_h * chan; #pragma omp parallel for for (size_t p = 0; p < p_limit; p++) { size_t u = p % crop_w; size_t v = (p / crop_w) % crop_h; size_t a = p / (crop_w * crop_h); ptrdiff_t x = offset_x + u; ptrdiff_t y = offset_y + v; if (x < 0 || x >= in_width || y < 0 || y >= in_height) { out_pixels[p] = 0.0f; } else { out_pixels[p] = in_pixels[x + in_width * (y + in_height * a)]; } } } void neuralops_omp_image_flip( size_t width, size_t height, size_t chan, const float *in_pixels, float *out_pixels) { size_t p_limit = width * height * chan; #pragma omp parallel for for (size_t p = 0; p < p_limit; p++) { size_t x = p % width; size_t y = (p / width) % height; size_t a = p / (width * height); out_pixels[p] = in_pixels[(width - x - 1) + width * (y + height * a)]; } }
omp_for_lastprivate.c
// RUN: %libomp-compile-and-run #include <stdio.h> #include <math.h> #include "omp_testsuite.h" int sum0; #pragma omp threadprivate(sum0) int test_omp_for_lastprivate() { int sum = 0; int known_sum; int i0; i0 = -1; #pragma omp parallel { sum0 = 0; { /* Begin of orphaned block */ int i; #pragma omp for schedule(static,7) lastprivate(i0) for (i = 1; i <= LOOPCOUNT; i++) { sum0 = sum0 + i; i0 = i; } /* end of for */ } /* end of orphaned block */ #pragma omp critical { sum = sum + sum0; } /* end of critical */ } /* end of parallel */ known_sum = (LOOPCOUNT * (LOOPCOUNT + 1)) / 2; fprintf(stderr, "known_sum = %d , sum = %d\n",known_sum,sum); fprintf(stderr, "LOOPCOUNT = %d , i0 = %d\n",LOOPCOUNT,i0); return ((known_sum == sum) && (i0 == LOOPCOUNT)); } int main() { int i; int num_failed=0; for (i = 0; i < REPETITIONS; i++) { if(!test_omp_for_lastprivate()) { num_failed++; } } return num_failed; }
dlag2s.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/zlag2c.c, mixed zc -> ds, Fri Sep 28 17:38:17 2018 * **/ #include "plasma.h" #include "plasma_async.h" #include "plasma_context.h" #include "plasma_descriptor.h" #include "plasma_internal.h" #include "plasma_tuning.h" #include "plasma_types.h" /***************************************************************************//** * * @ingroup plasma_lag2 * * Converts m-by-n matrix A from complex double to complex single precision. * ******************************************************************************* * * @param[in] m * The number of rows of the matrix A. m >= 0. * * @param[in] n * The number of columns of the matrix A. n >= 0. * * @param[in] pA * The lda-by-n matrix A in double complex precision. * * @param[in] lda * The leading dimension of the array A. lda >= max(1,m). * * @param[out] pAs * On exit, the ldas-by-n matrix As in single complex precision. * * @param[in] ldas * The leading dimension of the array As. ldas >= max(1,m). * ******************************************************************************* * * @retval PlasmaSuccess successful exit * ******************************************************************************* * * @sa plasma_omp_dlag2s * @sa plasma_slag2d * @sa plasma_dlag2s * @sa plasma_slag2d * ******************************************************************************/ int plasma_dlag2s(int m, int n, double *pA, int lda, float *pAs, int ldas) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_error("PLASMA not initialized"); return PlasmaErrorNotInitialized; } // Check input arguments. if (m < 0) { plasma_error("illegal value of m"); return -1; } if (n < 0) { plasma_error("illegal value of n"); return -2; } if (lda < imax(1, m)) { plasma_error("illegal value of lda"); return -4; } if (ldas < imax(1, m)) { plasma_error("illegal value of ldas"); return -6; } // quick return if (imin(n, m) == 0) return PlasmaSuccess; // Tune parameters. if (plasma->tuning) plasma_tune_lag2c(plasma, PlasmaRealDouble, m, n); // Set tiling parameters. int nb = plasma->nb; // Create tile matrices. plasma_desc_t A; plasma_desc_t As; int retval; retval = plasma_desc_general_create(PlasmaRealDouble, nb, nb, m, n, 0, 0, m, n, &A); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); return retval; } retval = plasma_desc_general_create(PlasmaRealFloat, nb, nb, m, n, 0, 0, m, n, &As); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); plasma_desc_destroy(&A); return retval; } // Initialize sequence. plasma_sequence_t sequence; retval = plasma_sequence_init(&sequence); // Initialize request. plasma_request_t request; retval = plasma_request_init(&request); // asynchronous block #pragma omp parallel #pragma omp master { // Translate to tile layout. plasma_omp_dge2desc(pA, lda, A, &sequence, &request); plasma_omp_sge2desc(pAs, ldas, As, &sequence, &request); // Call tile async function. plasma_omp_dlag2s(A, As, &sequence, &request); // Translate back to LAPACK layout. plasma_omp_ddesc2ge(A, pA, lda, &sequence, &request); plasma_omp_sdesc2ge(As, pAs, ldas, &sequence, &request); } // implicit synchronization // Free matrices in tile layout. plasma_desc_destroy(&A); plasma_desc_destroy(&As); // Return status. int status = sequence.status; return status; } /***************************************************************************//** * * @ingroup plasma_lag2 * * Converts m-by-n matrix A from double complex to single complex precision. * Non-blocking tile version of plasma_dlag2s(). May return before the * computation is finished. Operates on matrices stored by tiles. All matrices * are passed through descriptors. All dimensions are taken from the * descriptors. Allows for pipelining of operations at runtime. * ******************************************************************************* * * @param[in] A * Descriptor of matrix A. * * @param[out] As * Descriptor of matrix As. * * @param[in] sequence * Identifies the sequence of function calls that this call belongs to * (for completion checks and exception handling purposes). Check the * sequence->status for errors. * * @param[out] request * Identifies this function call (for exception handling purposes). * * @retval void * Errors are returned by setting sequence->status and * request->status to error values. The sequence->status and * request->status should never be set to PlasmaSuccess (the * initial values) since another async call may be setting a * failure value at the same time. * ******************************************************************************* * * @sa plasma_dlag2s * @sa plasma_omp_slag2d * @sa plasma_omp_dlag2s * @sa plasma_omp_slag2d * ******************************************************************************/ void plasma_omp_dlag2s(plasma_desc_t A, plasma_desc_t As, plasma_sequence_t *sequence, plasma_request_t *request) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_error("PLASMA not initialized"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // Check input arguments. if (plasma_desc_check(A) != PlasmaSuccess) { plasma_error("invalid A"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(As) != PlasmaSuccess) { plasma_error("invalid As"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (sequence == NULL) { plasma_error("NULL sequence"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (request == NULL) { plasma_error("NULL request"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // quick return if (imin(A.m, A.n) == 0) return; // Call the parallel function. plasma_pdlag2s(A, As, sequence, request); }
pngquant.c
/* pngquant.c - quantize the colors in an alphamap down to a specified number ** ** Copyright (C) 1989, 1991 by Jef Poskanzer. ** ** Permission to use, copy, modify, and distribute this software and its ** documentation for any purpose and without fee is hereby granted, provided ** that the above copyright notice appear in all copies and that both that ** copyright notice and this permission notice appear in supporting ** documentation. This software is provided "as is" without express or ** implied warranty. ** ** - - - - ** ** © 1997-2002 by Greg Roelofs; based on an idea by Stefan Schneider. ** © 2009-2014 by Kornel Lesiński. ** ** All rights reserved. ** ** Redistribution and use in source and binary forms, with or without modification, ** are permitted provided that the following conditions are met: ** ** 1. Redistributions of source code must retain the above copyright notice, ** this list of conditions and the following disclaimer. ** ** 2. Redistributions in binary form must reproduce the above copyright notice, ** this list of conditions and the following disclaimer in the documentation ** and/or other materials provided with the distribution. ** ** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ** AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ** IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ** DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE ** FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL ** DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR ** SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER ** CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, ** OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ** OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ** */ #define PNGQUANT_VERSION LIQ_VERSION_STRING " (June 2015)" #define PNGQUANT_USAGE "\ usage: pngquant [options] [ncolors] -- pngfile [pngfile ...]\n\ pngquant [options] [ncolors] - >stdout <stdin\n\n\ options:\n\ --force overwrite existing output files (synonym: -f)\n\ --skip-if-larger only save converted files if they're smaller than original\n\ --output file output path, only if one input file is specified (synonym: -o)\n\ --ext new.png set custom suffix/extension for output filenames\n\ --quality min-max don't save below min, use fewer colors below max (0-100)\n\ --speed N speed/quality trade-off. 1=slow, 3=default, 11=fast & rough\n\ --nofs disable Floyd-Steinberg dithering\n\ --posterize N output lower resolution color (e.g. for ARGB4444 output)\n\ --verbose print status messages (synonym: -v)\n\ \n\ Quantizes one or more 32-bit RGBA PNGs to 8-bit (or smaller) RGBA-palette\n\ PNGs using Floyd-Steinberg diffusion dithering (unless disabled).\n\ The output filename is the same as the input name except that\n\ it ends in \"-fs8.png\", \"-or8.png\" or your custom extension (unless the\n\ input is stdin, in which case the quantized image will go to stdout).\n\ The default behavior if the output file exists is to skip the conversion;\n\ use --force to overwrite. See man page for full list of options.\n" #include <stdio.h> #include <stdlib.h> #include <string.h> #include <stdarg.h> #include <stdbool.h> #include <getopt.h> #include <unistd.h> extern char *optarg; extern int optind, opterr; #if defined(WIN32) || defined(__WIN32__) # include <fcntl.h> /* O_BINARY */ # include <io.h> /* setmode() */ #endif #ifdef _OPENMP #include <omp.h> #else #define omp_get_max_threads() 1 #define omp_get_thread_num() 0 #endif #include "rwpng.h" /* typedefs, common macros, public prototypes */ #include "lib/libimagequant.h" struct pngquant_options { liq_attr *liq; liq_image *fixed_palette_image; liq_log_callback_function *log_callback; void *log_callback_user_info; float floyd; bool using_stdin, using_stdout, force, fast_compression, ie_mode, min_quality_limit, skip_if_larger, verbose; }; static pngquant_error prepare_output_image(liq_result *result, liq_image *input_image, png8_image *output_image); static void set_palette(liq_result *result, png8_image *output_image); static pngquant_error read_image(liq_attr *options, const char *filename, int using_stdin, png24_image *input_image_p, liq_image **liq_image_p, bool keep_input_pixels, bool verbose); static pngquant_error write_image(png8_image *output_image, png24_image *output_image24, const char *outname, struct pngquant_options *options); static char *add_filename_extension(const char *filename, const char *newext); static bool file_exists(const char *outname); static void verbose_printf(struct pngquant_options *context, const char *fmt, ...) { if (context->log_callback) { va_list va; va_start(va, fmt); int required_space = vsnprintf(NULL, 0, fmt, va)+1; // +\0 va_end(va); char buf[required_space]; va_start(va, fmt); vsnprintf(buf, required_space, fmt, va); va_end(va); context->log_callback(context->liq, buf, context->log_callback_user_info); } } static void log_callback(const liq_attr *attr, const char *msg, void* user_info) { fprintf(stderr, "%s\n", msg); } #ifdef _OPENMP #define LOG_BUFFER_SIZE 1300 struct buffered_log { int buf_used; char buf[LOG_BUFFER_SIZE]; }; static void log_callback_buferred_flush(const liq_attr *attr, void *context) { struct buffered_log *log = context; if (log->buf_used) { fwrite(log->buf, 1, log->buf_used, stderr); fflush(stderr); log->buf_used = 0; } } static void log_callback_buferred(const liq_attr *attr, const char *msg, void* context) { struct buffered_log *log = context; int len = strlen(msg); if (len > LOG_BUFFER_SIZE-2) len = LOG_BUFFER_SIZE-2; if (len > LOG_BUFFER_SIZE - log->buf_used - 2) log_callback_buferred_flush(attr, log); memcpy(&log->buf[log->buf_used], msg, len); log->buf_used += len+1; log->buf[log->buf_used-1] = '\n'; log->buf[log->buf_used] = '\0'; } #endif static void print_full_version(FILE *fd) { fprintf(fd, "pngquant, %s, by Greg Roelofs, Kornel Lesinski.\n" #ifndef NDEBUG " DEBUG (slow) version.\n" /* NDEBUG disables assert() */ #endif #if USE_SSE " Compiled with SSE instructions.\n" #endif #if _OPENMP " Compiled with OpenMP (multicore support).\n" #endif , PNGQUANT_VERSION); rwpng_version_info(fd); fputs("\n", fd); } static void print_usage(FILE *fd) { fputs(PNGQUANT_USAGE, fd); } /** * N = automatic quality, uses limit unless force is set (N-N or 0-N) * -N = no better than N (same as 0-N) * N-M = no worse than N, no better than M * N- = no worse than N, perfect if possible (same as N-100) * * where N,M are numbers between 0 (lousy) and 100 (perfect) */ static bool parse_quality(const char *quality, liq_attr *options, bool *min_quality_limit) { long limit, target; const char *str = quality; char *end; long t1 = strtol(str, &end, 10); if (str == end) return false; str = end; if ('\0' == end[0] && t1 < 0) { // quality="-%d" target = -t1; limit = 0; } else if ('\0' == end[0]) { // quality="%d" target = t1; limit = t1*9/10; } else if ('-' == end[0] && '\0' == end[1]) { // quality="%d-" target = 100; limit = t1; } else { // quality="%d-%d" long t2 = strtol(str, &end, 10); if (str == end || t2 > 0) return false; target = -t2; limit = t1; } *min_quality_limit = (limit > 0); return LIQ_OK == liq_set_quality(options, limit, target); } static const struct {const char *old; const char *newopt;} obsolete_options[] = { {"-fs","--floyd=1"}, {"-nofs", "--ordered"}, {"-floyd", "--floyd=1"}, {"-nofloyd", "--ordered"}, {"-ordered", "--ordered"}, {"-force", "--force"}, {"-noforce", "--no-force"}, {"-verbose", "--verbose"}, {"-quiet", "--quiet"}, {"-noverbose", "--quiet"}, {"-noquiet", "--verbose"}, {"-help", "--help"}, {"-version", "--version"}, {"-ext", "--ext"}, {"-speed", "--speed"}, }; static void fix_obsolete_options(const unsigned int argc, char *argv[]) { for(unsigned int argn=1; argn < argc; argn++) { if ('-' != argv[argn][0]) continue; if ('-' == argv[argn][1]) break; // stop on first --option or -- for(unsigned int i=0; i < sizeof(obsolete_options)/sizeof(obsolete_options[0]); i++) { if (0 == strcmp(obsolete_options[i].old, argv[argn])) { fprintf(stderr, " warning: option '%s' has been replaced with '%s'.\n", obsolete_options[i].old, obsolete_options[i].newopt); argv[argn] = (char*)obsolete_options[i].newopt; } } } } enum {arg_floyd=1, arg_ordered, arg_ext, arg_no_force, arg_iebug, arg_transbug, arg_map, arg_posterize, arg_skip_larger}; static const struct option long_options[] = { {"verbose", no_argument, NULL, 'v'}, {"quiet", no_argument, NULL, 'q'}, {"force", no_argument, NULL, 'f'}, {"no-force", no_argument, NULL, arg_no_force}, {"floyd", optional_argument, NULL, arg_floyd}, {"ordered", no_argument, NULL, arg_ordered}, {"nofs", no_argument, NULL, arg_ordered}, {"iebug", no_argument, NULL, arg_iebug}, {"transbug", no_argument, NULL, arg_transbug}, {"ext", required_argument, NULL, arg_ext}, {"skip-if-larger", no_argument, NULL, arg_skip_larger}, {"output", required_argument, NULL, 'o'}, {"speed", required_argument, NULL, 's'}, {"quality", required_argument, NULL, 'Q'}, {"posterize", required_argument, NULL, arg_posterize}, {"map", required_argument, NULL, arg_map}, {"version", no_argument, NULL, 'V'}, {"help", no_argument, NULL, 'h'}, {NULL, 0, NULL, 0}, }; pngquant_error pngquant_file(const char *filename, const char *outname, struct pngquant_options *options); int main(int argc, char *argv[]) { struct pngquant_options options = { .floyd = 1.f, // floyd-steinberg dithering }; options.liq = liq_attr_create(); if (!options.liq) { fputs("SSE-capable CPU is required for this build.\n", stderr); return WRONG_ARCHITECTURE; } unsigned int error_count=0, skipped_count=0, file_count=0; pngquant_error latest_error=SUCCESS; const char *newext = NULL, *output_file_path = NULL; fix_obsolete_options(argc, argv); int opt; do { opt = getopt_long(argc, argv, "Vvqfhs:Q:o:", long_options, NULL); switch (opt) { case 'v': options.verbose = true; break; case 'q': options.verbose = false; break; case arg_floyd: options.floyd = optarg ? atof(optarg) : 1.0; if (options.floyd < 0 || options.floyd > 1.f) { fputs("--floyd argument must be in 0..1 range\n", stderr); return INVALID_ARGUMENT; } break; case arg_ordered: options.floyd = 0; break; case 'f': options.force = true; break; case arg_no_force: options.force = false; break; case arg_ext: newext = optarg; break; case 'o': if (output_file_path) { fputs("--output option can be used only once\n", stderr); return INVALID_ARGUMENT; } output_file_path = optarg; break; case arg_iebug: // opacities above 238 will be rounded up to 255, because IE6 truncates <255 to 0. liq_set_min_opacity(options.liq, 238); options.ie_mode = true; break; case arg_transbug: liq_set_last_index_transparent(options.liq, true); break; case arg_skip_larger: options.skip_if_larger = true; break; case 's': { int speed = atoi(optarg); if (speed >= 10) { options.fast_compression = true; } if (speed == 11) { options.floyd = 0; speed = 10; } if (LIQ_OK != liq_set_speed(options.liq, speed)) { fputs("Speed should be between 1 (slow) and 11 (fast).\n", stderr); return INVALID_ARGUMENT; } } break; case 'Q': if (!parse_quality(optarg, options.liq, &options.min_quality_limit)) { fputs("Quality should be in format min-max where min and max are numbers in range 0-100.\n", stderr); return INVALID_ARGUMENT; } break; case arg_posterize: if (LIQ_OK != liq_set_min_posterization(options.liq, atoi(optarg))) { fputs("Posterization should be number of bits in range 0-4.\n", stderr); return INVALID_ARGUMENT; } break; case arg_map: { png24_image tmp = {}; if (SUCCESS != read_image(options.liq, optarg, false, &tmp, &options.fixed_palette_image, false, false)) { fprintf(stderr, " error: unable to load %s", optarg); return INVALID_ARGUMENT; } } break; case 'h': print_full_version(stdout); print_usage(stdout); return SUCCESS; case 'V': puts(PNGQUANT_VERSION); return SUCCESS; case -1: break; default: return INVALID_ARGUMENT; } } while (opt != -1); int argn = optind; if (argn >= argc) { if (argn > 1) { fputs("No input files specified.\n", stderr); } else { print_full_version(stderr); } print_usage(stderr); return MISSING_ARGUMENT; } if (options.verbose) { liq_set_log_callback(options.liq, log_callback, NULL); options.log_callback = log_callback; } char *colors_end; unsigned long colors = strtoul(argv[argn], &colors_end, 10); if (colors_end != argv[argn] && '\0' == colors_end[0]) { if (LIQ_OK != liq_set_max_colors(options.liq, colors)) { fputs("Number of colors must be between 2 and 256.\n", stderr); return INVALID_ARGUMENT; } argn++; } if (newext && output_file_path) { fputs("--ext and --output options can't be used at the same time\n", stderr); return INVALID_ARGUMENT; } // new filename extension depends on options used. Typically basename-fs8.png if (newext == NULL) { newext = options.floyd > 0 ? "-ie-fs8.png" : "-ie-or8.png"; if (!options.ie_mode) { newext += 3; /* skip "-ie" */ } } if (argn == argc || (argn == argc-1 && 0==strcmp(argv[argn],"-"))) { options.using_stdin = true; options.using_stdout = !output_file_path; argn = argc-1; } const int num_files = argc-argn; if (output_file_path && num_files != 1) { fputs("Only one input file is allowed when --output is used\n", stderr); return INVALID_ARGUMENT; } #ifdef _OPENMP // if there's a lot of files, coarse parallelism can be used if (num_files > 2*omp_get_max_threads()) { omp_set_nested(0); omp_set_dynamic(1); } else { omp_set_nested(1); } #endif #pragma omp parallel for \ schedule(static, 1) reduction(+:skipped_count) reduction(+:error_count) reduction(+:file_count) shared(latest_error) for(int i=0; i < num_files; i++) { struct pngquant_options opts = options; opts.liq = liq_attr_copy(options.liq); const char *filename = opts.using_stdin ? "stdin" : argv[argn+i]; #ifdef _OPENMP struct buffered_log buf = {}; if (opts.log_callback && omp_get_num_threads() > 1 && num_files > 1) { liq_set_log_callback(opts.liq, log_callback_buferred, &buf); liq_set_log_flush_callback(opts.liq, log_callback_buferred_flush, &buf); options.log_callback = log_callback_buferred; options.log_callback_user_info = &buf; } #endif pngquant_error retval = SUCCESS; const char *outname = output_file_path; char *outname_free = NULL; if (!options.using_stdout) { if (!outname) { outname = outname_free = add_filename_extension(filename, newext); } if (!options.force && file_exists(outname)) { fprintf(stderr, " error: '%s' exists; not overwriting\n", outname); retval = NOT_OVERWRITING_ERROR; } } if (SUCCESS == retval) { retval = pngquant_file(filename, outname, &opts); } free(outname_free); liq_attr_destroy(opts.liq); if (retval) { #pragma omp critical { latest_error = retval; } if (retval == TOO_LOW_QUALITY || retval == TOO_LARGE_FILE) { skipped_count++; } else { error_count++; } } ++file_count; } if (error_count) { verbose_printf(&options, "There were errors quantizing %d file%s out of a total of %d file%s.", error_count, (error_count == 1)? "" : "s", file_count, (file_count == 1)? "" : "s"); } if (skipped_count) { verbose_printf(&options, "Skipped %d file%s out of a total of %d file%s.", skipped_count, (skipped_count == 1)? "" : "s", file_count, (file_count == 1)? "" : "s"); } if (!skipped_count && !error_count) { verbose_printf(&options, "No errors detected while quantizing %d image%s.", file_count, (file_count == 1)? "" : "s"); } liq_image_destroy(options.fixed_palette_image); liq_attr_destroy(options.liq); return latest_error; } pngquant_error pngquant_file(const char *filename, const char *outname, struct pngquant_options *options) { pngquant_error retval = SUCCESS; verbose_printf(options, "%s:", filename); liq_image *input_image = NULL; png24_image input_image_rwpng = {}; bool keep_input_pixels = options->skip_if_larger || (options->using_stdout && options->min_quality_limit); // original may need to be output to stdout if (SUCCESS == retval) { retval = read_image(options->liq, filename, options->using_stdin, &input_image_rwpng, &input_image, keep_input_pixels, options->verbose); } int quality_percent = 90; // quality on 0-100 scale, updated upon successful remap png8_image output_image = {}; if (SUCCESS == retval) { verbose_printf(options, " read %luKB file", (input_image_rwpng.file_size+1023UL)/1024UL); #if USE_LCMS if (input_image_rwpng.lcms_status == ICCP) { verbose_printf(options, " used embedded ICC profile to transform image to sRGB colorspace"); } else if (input_image_rwpng.lcms_status == GAMA_CHRM) { verbose_printf(options, " used gAMA and cHRM chunks to transform image to sRGB colorspace"); } else if (input_image_rwpng.lcms_status == ICCP_WARN_GRAY) { verbose_printf(options, " warning: ignored ICC profile in GRAY colorspace"); } #endif if (input_image_rwpng.gamma != 0.45455) { verbose_printf(options, " corrected image from gamma %2.1f to sRGB gamma", 1.0/input_image_rwpng.gamma); } // when using image as source of a fixed palette the palette is extracted using regular quantization liq_result *remap = liq_quantize_image(options->liq, options->fixed_palette_image ? options->fixed_palette_image : input_image); if (remap) { liq_set_output_gamma(remap, 0.45455); // fixed gamma ~2.2 for the web. PNG can't store exact 1/2.2 liq_set_dithering_level(remap, options->floyd); retval = prepare_output_image(remap, input_image, &output_image); if (SUCCESS == retval) { if (LIQ_OK != liq_write_remapped_image_rows(remap, input_image, output_image.row_pointers)) { retval = OUT_OF_MEMORY_ERROR; } set_palette(remap, &output_image); double palette_error = liq_get_quantization_error(remap); if (palette_error >= 0) { quality_percent = liq_get_quantization_quality(remap); verbose_printf(options, " mapped image to new colors...MSE=%.3f (Q=%d)", palette_error, quality_percent); } } liq_result_destroy(remap); } else { retval = TOO_LOW_QUALITY; } } if (SUCCESS == retval) { if (options->skip_if_larger) { // this is very rough approximation, but generally avoid losing more quality than is gained in file size. // Quality is squared, because even greater savings are needed to justify big quality loss. double quality = quality_percent/100.0; output_image.maximum_file_size = (input_image_rwpng.file_size-1) * quality*quality; } output_image.fast_compression = options->fast_compression; output_image.chunks = input_image_rwpng.chunks; input_image_rwpng.chunks = NULL; retval = write_image(&output_image, NULL, outname, options); if (TOO_LARGE_FILE == retval) { verbose_printf(options, " file exceeded expected size of %luKB", (unsigned long)output_image.maximum_file_size/1024UL); } } if (options->using_stdout && keep_input_pixels && (TOO_LARGE_FILE == retval || TOO_LOW_QUALITY == retval)) { // when outputting to stdout it'd be nasty to create 0-byte file // so if quality is too low, output 24-bit original pngquant_error write_retval = write_image(NULL, &input_image_rwpng, outname, options); if (write_retval) { retval = write_retval; } } liq_image_destroy(input_image); rwpng_free_image24(&input_image_rwpng); rwpng_free_image8(&output_image); return retval; } static void set_palette(liq_result *result, png8_image *output_image) { const liq_palette *palette = liq_get_palette(result); // tRNS, etc. output_image->num_palette = palette->count; output_image->num_trans = 0; for(unsigned int i=0; i < palette->count; i++) { liq_color px = palette->entries[i]; if (px.a < 255) { output_image->num_trans = i+1; } output_image->palette[i] = (png_color){.red=px.r, .green=px.g, .blue=px.b}; output_image->trans[i] = px.a; } } static bool file_exists(const char *outname) { FILE *outfile = fopen(outname, "rb"); if ((outfile ) != NULL) { fclose(outfile); return true; } return false; } /* build the output filename from the input name by inserting "-fs8" or * "-or8" before the ".png" extension (or by appending that plus ".png" if * there isn't any extension), then make sure it doesn't exist already */ static char *add_filename_extension(const char *filename, const char *newext) { size_t x = strlen(filename); char* outname = malloc(x+4+strlen(newext)+1); if (!outname) return NULL; strncpy(outname, filename, x); if (strncmp(outname+x-4, ".png", 4) == 0 || strncmp(outname+x-4, ".PNG", 4) == 0) { strcpy(outname+x-4, newext); } else { strcpy(outname+x, newext); } return outname; } static char *temp_filename(const char *basename) { size_t x = strlen(basename); char *outname = malloc(x+1+4); if (!outname) return NULL; strcpy(outname, basename); strcpy(outname+x, ".tmp"); return outname; } static void set_binary_mode(FILE *fp) { #if defined(WIN32) || defined(__WIN32__) setmode(fp == stdout ? 1 : 0, O_BINARY); #endif } static const char *filename_part(const char *path) { const char *outfilename = strrchr(path, '/'); if (outfilename) { return outfilename+1; } else { return path; } } static bool replace_file(const char *from, const char *to, const bool force) { #if defined(WIN32) || defined(__WIN32__) if (force) { // On Windows rename doesn't replace unlink(to); } #endif return (0 == rename(from, to)); } static pngquant_error write_image(png8_image *output_image, png24_image *output_image24, const char *outname, struct pngquant_options *options) { FILE *outfile; char *tempname = NULL; if (options->using_stdout) { set_binary_mode(stdout); outfile = stdout; if (output_image) { verbose_printf(options, " writing %d-color image to stdout", output_image->num_palette); } else { verbose_printf(options, " writing truecolor image to stdout"); } } else { tempname = temp_filename(outname); if (!tempname) return OUT_OF_MEMORY_ERROR; if ((outfile = fopen(tempname, "wb")) == NULL) { fprintf(stderr, " error: cannot open '%s' for writing\n", tempname); free(tempname); return CANT_WRITE_ERROR; } if (output_image) { verbose_printf(options, " writing %d-color image as %s", output_image->num_palette, filename_part(outname)); } else { verbose_printf(options, " writing truecolor image as %s", filename_part(outname)); } } pngquant_error retval; #pragma omp critical (libpng) { if (output_image) { retval = rwpng_write_image8(outfile, output_image); } else { retval = rwpng_write_image24(outfile, output_image24); } } if (!options->using_stdout) { fclose(outfile); if (SUCCESS == retval) { // Image has been written to a temporary file and then moved over destination. // This makes replacement atomic and avoids damaging destination file on write error. if (!replace_file(tempname, outname, options->force)) { retval = CANT_WRITE_ERROR; } } if (retval) { unlink(tempname); } free(tempname); } if (retval && retval != TOO_LARGE_FILE) { fprintf(stderr, " error: failed writing image to %s\n", outname); } return retval; } static pngquant_error read_image(liq_attr *options, const char *filename, int using_stdin, png24_image *input_image_p, liq_image **liq_image_p, bool keep_input_pixels, bool verbose) { FILE *infile; if (using_stdin) { set_binary_mode(stdin); infile = stdin; } else if ((infile = fopen(filename, "rb")) == NULL) { fprintf(stderr, " error: cannot open %s for reading\n", filename); return READ_ERROR; } pngquant_error retval; #pragma omp critical (libpng) { retval = rwpng_read_image24(infile, input_image_p, verbose); } if (!using_stdin) { fclose(infile); } if (retval) { fprintf(stderr, " error: cannot decode image %s\n", using_stdin ? "from stdin" : filename_part(filename)); return retval; } *liq_image_p = liq_image_create_rgba_rows(options, (void**)input_image_p->row_pointers, input_image_p->width, input_image_p->height, input_image_p->gamma); if (!*liq_image_p) { return OUT_OF_MEMORY_ERROR; } if (!keep_input_pixels) { if (LIQ_OK != liq_image_set_memory_ownership(*liq_image_p, LIQ_OWN_ROWS | LIQ_OWN_PIXELS)) { return OUT_OF_MEMORY_ERROR; } input_image_p->row_pointers = NULL; input_image_p->rgba_data = NULL; } return SUCCESS; } static pngquant_error prepare_output_image(liq_result *result, liq_image *input_image, png8_image *output_image) { output_image->width = liq_image_get_width(input_image); output_image->height = liq_image_get_height(input_image); output_image->gamma = liq_get_output_gamma(result); /* ** Step 3.7 [GRR]: allocate memory for the entire indexed image */ output_image->indexed_data = malloc(output_image->height * output_image->width); output_image->row_pointers = malloc(output_image->height * sizeof(output_image->row_pointers[0])); if (!output_image->indexed_data || !output_image->row_pointers) { return OUT_OF_MEMORY_ERROR; } for(unsigned int row = 0; row < output_image->height; ++row) { output_image->row_pointers[row] = output_image->indexed_data + row*output_image->width; } const liq_palette *palette = liq_get_palette(result); // tRNS, etc. output_image->num_palette = palette->count; output_image->num_trans = 0; for(unsigned int i=0; i < palette->count; i++) { if (palette->entries[i].a < 255) { output_image->num_trans = i+1; } } return SUCCESS; }
DRB059-lastprivate-orig-no.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Using lastprivate() to resolve an output dependence. Semantics of lastprivate (x): causes the corresponding original list item to be updated after the end of the region. The compiler/runtime copies the local value back to the shared one within the last iteration. */ #include <stdio.h> void foo() { int i,x; #pragma omp parallel for private(i ) lastprivate(x ) for (i=0;i<100;i++) x=i; printf("x=%d",x); } int main() { foo(); return 0; }
GB_binop__lt_uint32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__lt_uint32) // A.*B function (eWiseMult): GB (_AemultB_08__lt_uint32) // A.*B function (eWiseMult): GB (_AemultB_02__lt_uint32) // A.*B function (eWiseMult): GB (_AemultB_04__lt_uint32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__lt_uint32) // A*D function (colscale): GB (_AxD__lt_uint32) // D*A function (rowscale): GB (_DxB__lt_uint32) // C+=B function (dense accum): GB (_Cdense_accumB__lt_uint32) // C+=b function (dense accum): GB (_Cdense_accumb__lt_uint32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lt_uint32) // C=scalar+B GB (_bind1st__lt_uint32) // C=scalar+B' GB (_bind1st_tran__lt_uint32) // C=A+scalar GB (_bind2nd__lt_uint32) // C=A'+scalar GB (_bind2nd_tran__lt_uint32) // C type: bool // A type: uint32_t // A pattern? 0 // B type: uint32_t // B pattern? 0 // BinaryOp: cij = (aij < bij) #define GB_ATYPE \ uint32_t #define GB_BTYPE \ uint32_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint32_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint32_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x < y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LT || GxB_NO_UINT32 || GxB_NO_LT_UINT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__lt_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__lt_uint32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__lt_uint32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type uint32_t uint32_t bwork = (*((uint32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__lt_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__lt_uint32) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__lt_uint32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint32_t alpha_scalar ; uint32_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint32_t *) alpha_scalar_in)) ; beta_scalar = (*((uint32_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__lt_uint32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__lt_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__lt_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__lt_uint32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__lt_uint32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; uint32_t x = (*((uint32_t *) x_input)) ; uint32_t *Bx = (uint32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint32_t bij = GBX (Bx, p, false) ; Cx [p] = (x < bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__lt_uint32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; uint32_t *Ax = (uint32_t *) Ax_input ; uint32_t y = (*((uint32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint32_t aij = GBX (Ax, p, false) ; Cx [p] = (aij < y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x < aij) ; \ } GrB_Info GB (_bind1st_tran__lt_uint32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t x = (*((const uint32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij < y) ; \ } GrB_Info GB (_bind2nd_tran__lt_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t y = (*((const uint32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
DRB038-truedepseconddimension-var-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Only the outmost loop can be parallelized in this program. Data race pair: b[i][j]@65:7 vs. b[i][j-1]@65:15 */ #include "omprace.h" #include <omp.h> #include <stdlib.h> int main(int argc, char* argv[]) { omprace_init(); int i,j; int len = 1000; if (argc>1) len = atoi(argv[1]); int n=len, m=len; double b[n][m]; for (i=0;i<n;i++) #pragma omp parallel for for (j=1;j<m;j++) b[i][j]=b[i][j-1]; omprace_fini(); return 0; }
psd.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % PPPP SSSSS DDDD % % P P SS D D % % PPPP SSS D D % % P SS D D % % P SSSSS DDDD % % % % % % Read/Write Adobe Photoshop Image Format % % % % Software Design % % Cristy % % Leonard Rosenthol % % July 1992 % % Dirk Lemstra % % December 2013 % % % % % % Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Photoshop spec @ https://www.adobe.com/devnet-apps/photoshop/fileformatashtml % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/channel.h" #include "MagickCore/colormap.h" #include "MagickCore/colormap-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/constitute.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/module.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/policy.h" #include "MagickCore/profile.h" #include "MagickCore/property.h" #include "MagickCore/registry.h" #include "MagickCore/quantum-private.h" #include "MagickCore/static.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #ifdef MAGICKCORE_ZLIB_DELEGATE #include <zlib.h> #endif #include "psd-private.h" /* Define declaractions. */ #define MaxPSDChannels 56 #define PSDQuantum(x) (((ssize_t) (x)+1) & -2) /* Enumerated declaractions. */ typedef enum { Raw = 0, RLE = 1, ZipWithoutPrediction = 2, ZipWithPrediction = 3 } PSDCompressionType; typedef enum { BitmapMode = 0, GrayscaleMode = 1, IndexedMode = 2, RGBMode = 3, CMYKMode = 4, MultichannelMode = 7, DuotoneMode = 8, LabMode = 9 } PSDImageType; /* Typedef declaractions. */ typedef struct _ChannelInfo { short type; size_t size; } ChannelInfo; typedef struct _MaskInfo { Image *image; RectangleInfo page; unsigned char background, flags; } MaskInfo; typedef struct _LayerInfo { ChannelInfo channel_info[MaxPSDChannels]; char blendkey[4]; Image *image; MaskInfo mask; Quantum opacity; RectangleInfo page; size_t offset_x, offset_y; unsigned char clipping, flags, name[257], visible; unsigned short channels; StringInfo *info; } LayerInfo; /* Forward declarations. */ static MagickBooleanType WritePSDImage(const ImageInfo *,Image *,ExceptionInfo *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s P S D % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsPSD()() returns MagickTrue if the image format type, identified by the % magick string, is PSD. % % The format of the IsPSD method is: % % MagickBooleanType IsPSD(const unsigned char *magick,const size_t length) % % A description of each parameter follows: % % o magick: compare image format pattern against these bytes. % % o length: Specifies the length of the magick string. % */ static MagickBooleanType IsPSD(const unsigned char *magick,const size_t length) { if (length < 4) return(MagickFalse); if (LocaleNCompare((const char *) magick,"8BPS",4) == 0) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d P S D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadPSDImage() reads an Adobe Photoshop image file and returns it. It % allocates the memory necessary for the new Image structure and returns a % pointer to the new image. % % The format of the ReadPSDImage method is: % % Image *ReadPSDImage(image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o exception: return any errors or warnings in this structure. % */ static const char *CompositeOperatorToPSDBlendMode(Image *image) { switch (image->compose) { case ColorBurnCompositeOp: return(image->endian == LSBEndian ? "vidi" : "idiv"); case ColorDodgeCompositeOp: return(image->endian == LSBEndian ? " vid" : "div "); case ColorizeCompositeOp: return(image->endian == LSBEndian ? "rloc" : "colr"); case DarkenCompositeOp: return(image->endian == LSBEndian ? "krad" : "dark"); case DifferenceCompositeOp: return(image->endian == LSBEndian ? "ffid" : "diff"); case DissolveCompositeOp: return(image->endian == LSBEndian ? "ssid" : "diss"); case ExclusionCompositeOp: return(image->endian == LSBEndian ? "dums" : "smud"); case HardLightCompositeOp: return(image->endian == LSBEndian ? "tiLh" : "hLit"); case HardMixCompositeOp: return(image->endian == LSBEndian ? "xiMh" : "hMix"); case HueCompositeOp: return(image->endian == LSBEndian ? " euh" : "hue "); case LightenCompositeOp: return(image->endian == LSBEndian ? "etil" : "lite"); case LinearBurnCompositeOp: return(image->endian == LSBEndian ? "nrbl" : "lbrn"); case LinearDodgeCompositeOp: return(image->endian == LSBEndian ? "gddl" : "lddg"); case LinearLightCompositeOp: return(image->endian == LSBEndian ? "tiLl" : "lLit"); case LuminizeCompositeOp: return(image->endian == LSBEndian ? " mul" : "lum "); case MultiplyCompositeOp: return(image->endian == LSBEndian ? " lum" : "mul "); case OverlayCompositeOp: return(image->endian == LSBEndian ? "revo" : "over"); case PinLightCompositeOp: return(image->endian == LSBEndian ? "tiLp" : "pLit"); case SaturateCompositeOp: return(image->endian == LSBEndian ? " tas" : "sat "); case ScreenCompositeOp: return(image->endian == LSBEndian ? "nrcs" : "scrn"); case SoftLightCompositeOp: return(image->endian == LSBEndian ? "tiLs" : "sLit"); case VividLightCompositeOp: return(image->endian == LSBEndian ? "tiLv" : "vLit"); case OverCompositeOp: default: return(image->endian == LSBEndian ? "mron" : "norm"); } } /* For some reason Photoshop seems to blend semi-transparent pixels with white. This method reverts the blending. This can be disabled by setting the option 'psd:alpha-unblend' to off. */ static MagickBooleanType CorrectPSDAlphaBlend(const ImageInfo *image_info, Image *image,ExceptionInfo* exception) { const char *option; MagickBooleanType status; ssize_t y; if ((image->alpha_trait != BlendPixelTrait) || (image->colorspace != sRGBColorspace)) return(MagickTrue); option=GetImageOption(image_info,"psd:alpha-unblend"); if (IsStringFalse(option) != MagickFalse) return(MagickTrue); status=MagickTrue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double gamma; register ssize_t i; gamma=QuantumScale*GetPixelAlpha(image, q); if (gamma != 0.0 && gamma != 1.0) { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); if (channel != AlphaPixelChannel) q[i]=ClampToQuantum((q[i]-((1.0-gamma)*QuantumRange))/gamma); } } q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) status=MagickFalse; } return(status); } static inline CompressionType ConvertPSDCompression( PSDCompressionType compression) { switch (compression) { case RLE: return RLECompression; case ZipWithPrediction: case ZipWithoutPrediction: return ZipCompression; default: return NoCompression; } } static MagickBooleanType ApplyPSDLayerOpacity(Image *image,Quantum opacity, MagickBooleanType revert,ExceptionInfo *exception) { MagickBooleanType status; ssize_t y; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " applying layer opacity %.20g", (double) opacity); if (opacity == OpaqueAlpha) return(MagickTrue); if (image->alpha_trait != BlendPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); status=MagickTrue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if (revert == MagickFalse) SetPixelAlpha(image,(Quantum) (QuantumScale*(GetPixelAlpha(image,q))* opacity),q); else if (opacity > 0) SetPixelAlpha(image,(Quantum) (QuantumRange*(GetPixelAlpha(image,q)/ (MagickRealType) opacity)),q); q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) status=MagickFalse; } return(status); } static MagickBooleanType ApplyPSDOpacityMask(Image *image,const Image *mask, Quantum background,MagickBooleanType revert,ExceptionInfo *exception) { Image *complete_mask; MagickBooleanType status; PixelInfo color; ssize_t y; if (image->alpha_trait == UndefinedPixelTrait) return(MagickTrue); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " applying opacity mask"); complete_mask=CloneImage(image,0,0,MagickTrue,exception); if (complete_mask == (Image *) NULL) return(MagickFalse); complete_mask->alpha_trait=BlendPixelTrait; GetPixelInfo(complete_mask,&color); color.red=(MagickRealType) background; (void) SetImageColor(complete_mask,&color,exception); status=CompositeImage(complete_mask,mask,OverCompositeOp,MagickTrue, mask->page.x-image->page.x,mask->page.y-image->page.y,exception); if (status == MagickFalse) { complete_mask=DestroyImage(complete_mask); return(status); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register Quantum *p; register ssize_t x; if (status == MagickFalse) continue; q=GetAuthenticPixels(image,0,y,image->columns,1,exception); p=GetAuthenticPixels(complete_mask,0,y,complete_mask->columns,1,exception); if ((q == (Quantum *) NULL) || (p == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType alpha, intensity; alpha=(MagickRealType) GetPixelAlpha(image,q); intensity=GetPixelIntensity(complete_mask,p); if (revert == MagickFalse) SetPixelAlpha(image,ClampToQuantum(intensity*(QuantumScale*alpha)),q); else if (intensity > 0) SetPixelAlpha(image,ClampToQuantum((alpha/intensity)*QuantumRange),q); q+=GetPixelChannels(image); p+=GetPixelChannels(complete_mask); } if (SyncAuthenticPixels(image,exception) == MagickFalse) status=MagickFalse; } complete_mask=DestroyImage(complete_mask); return(status); } static void PreservePSDOpacityMask(Image *image,LayerInfo* layer_info, ExceptionInfo *exception) { char *key; RandomInfo *random_info; StringInfo *key_info; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " preserving opacity mask"); random_info=AcquireRandomInfo(); key_info=GetRandomKey(random_info,2+1); key=(char *) GetStringInfoDatum(key_info); key[8]=(char) layer_info->mask.background; key[9]='\0'; layer_info->mask.image->page.x+=layer_info->page.x; layer_info->mask.image->page.y+=layer_info->page.y; (void) SetImageRegistry(ImageRegistryType,(const char *) key, layer_info->mask.image,exception); (void) SetImageArtifact(layer_info->image,"psd:opacity-mask", (const char *) key); key_info=DestroyStringInfo(key_info); random_info=DestroyRandomInfo(random_info); } static ssize_t DecodePSDPixels(const size_t number_compact_pixels, const unsigned char *compact_pixels,const ssize_t depth, const size_t number_pixels,unsigned char *pixels) { #define CheckNumberCompactPixels \ if (packets == 0) \ return(i); \ packets-- #define CheckNumberPixels(count) \ if (((ssize_t) i + count) > (ssize_t) number_pixels) \ return(i); \ i+=count int pixel; register ssize_t i, j; size_t length; ssize_t packets; packets=(ssize_t) number_compact_pixels; for (i=0; (packets > 1) && (i < (ssize_t) number_pixels); ) { packets--; length=(size_t) (*compact_pixels++); if (length == 128) continue; if (length > 128) { length=256-length+1; CheckNumberCompactPixels; pixel=(*compact_pixels++); for (j=0; j < (ssize_t) length; j++) { switch (depth) { case 1: { CheckNumberPixels(8); *pixels++=(pixel >> 7) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 6) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 5) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 4) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 3) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 2) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 1) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 0) & 0x01 ? 0U : 255U; break; } case 2: { CheckNumberPixels(4); *pixels++=(unsigned char) ((pixel >> 6) & 0x03); *pixels++=(unsigned char) ((pixel >> 4) & 0x03); *pixels++=(unsigned char) ((pixel >> 2) & 0x03); *pixels++=(unsigned char) ((pixel & 0x03) & 0x03); break; } case 4: { CheckNumberPixels(2); *pixels++=(unsigned char) ((pixel >> 4) & 0xff); *pixels++=(unsigned char) ((pixel & 0x0f) & 0xff); break; } default: { CheckNumberPixels(1); *pixels++=(unsigned char) pixel; break; } } } continue; } length++; for (j=0; j < (ssize_t) length; j++) { CheckNumberCompactPixels; switch (depth) { case 1: { CheckNumberPixels(8); *pixels++=(*compact_pixels >> 7) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 6) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 5) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 4) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 3) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 2) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 1) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 0) & 0x01 ? 0U : 255U; break; } case 2: { CheckNumberPixels(4); *pixels++=(*compact_pixels >> 6) & 0x03; *pixels++=(*compact_pixels >> 4) & 0x03; *pixels++=(*compact_pixels >> 2) & 0x03; *pixels++=(*compact_pixels & 0x03) & 0x03; break; } case 4: { CheckNumberPixels(2); *pixels++=(*compact_pixels >> 4) & 0xff; *pixels++=(*compact_pixels & 0x0f) & 0xff; break; } default: { CheckNumberPixels(1); *pixels++=(*compact_pixels); break; } } compact_pixels++; } } return(i); } static inline LayerInfo *DestroyLayerInfo(LayerInfo *layer_info, const ssize_t number_layers) { ssize_t i; for (i=0; i<number_layers; i++) { if (layer_info[i].image != (Image *) NULL) layer_info[i].image=DestroyImage(layer_info[i].image); if (layer_info[i].mask.image != (Image *) NULL) layer_info[i].mask.image=DestroyImage(layer_info[i].mask.image); if (layer_info[i].info != (StringInfo *) NULL) layer_info[i].info=DestroyStringInfo(layer_info[i].info); } return (LayerInfo *) RelinquishMagickMemory(layer_info); } static inline size_t GetPSDPacketSize(const Image *image) { if (image->storage_class == PseudoClass) { if (image->colors > 256) return(2); } if (image->depth > 16) return(4); if (image->depth > 8) return(2); return(1); } static inline MagickSizeType GetPSDSize(const PSDInfo *psd_info,Image *image) { if (psd_info->version == 1) return((MagickSizeType) ReadBlobLong(image)); return((MagickSizeType) ReadBlobLongLong(image)); } static inline size_t GetPSDRowSize(Image *image) { if (image->depth == 1) return(((image->columns+7)/8)*GetPSDPacketSize(image)); else return(image->columns*GetPSDPacketSize(image)); } static const char *ModeToString(PSDImageType type) { switch (type) { case BitmapMode: return "Bitmap"; case GrayscaleMode: return "Grayscale"; case IndexedMode: return "Indexed"; case RGBMode: return "RGB"; case CMYKMode: return "CMYK"; case MultichannelMode: return "Multichannel"; case DuotoneMode: return "Duotone"; case LabMode: return "L*A*B"; default: return "unknown"; } } static MagickBooleanType NegateCMYK(Image *image,ExceptionInfo *exception) { ChannelType channel_mask; MagickBooleanType status; channel_mask=SetImageChannelMask(image,(ChannelType)(AllChannels &~ AlphaChannel)); status=NegateImage(image,MagickFalse,exception); (void) SetImageChannelMask(image,channel_mask); return(status); } static StringInfo *ParseImageResourceBlocks(PSDInfo *psd_info,Image *image, const unsigned char *blocks,size_t length,ExceptionInfo *exception) { const unsigned char *p; ssize_t offset; StringInfo *profile; unsigned char name_length; unsigned int count; unsigned short id, short_sans; if (length < 16) return((StringInfo *) NULL); profile=BlobToStringInfo((const unsigned char *) NULL,length); SetStringInfoDatum(profile,blocks); SetStringInfoName(profile,"8bim"); for (p=blocks; (p >= blocks) && (p < (blocks+length-7)); ) { if (LocaleNCompare((const char *) p,"8BIM",4) != 0) break; p+=4; p=PushShortPixel(MSBEndian,p,&id); p=PushCharPixel(p,&name_length); if ((name_length % 2) == 0) name_length++; p+=name_length; if (p > (blocks+length-4)) break; p=PushLongPixel(MSBEndian,p,&count); offset=(ssize_t) count; if (((p+offset) < blocks) || ((p+offset) > (blocks+length))) break; switch (id) { case 0x03ed: { unsigned short resolution; /* Resolution info. */ if (offset < 16) break; p=PushShortPixel(MSBEndian,p,&resolution); image->resolution.x=(double) resolution; (void) FormatImageProperty(image,"tiff:XResolution","%*g", GetMagickPrecision(),image->resolution.x); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&resolution); image->resolution.y=(double) resolution; (void) FormatImageProperty(image,"tiff:YResolution","%*g", GetMagickPrecision(),image->resolution.y); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); image->units=PixelsPerInchResolution; break; } case 0x0421: { if ((offset > 4) && (*(p+4) == 0)) psd_info->has_merged_image=MagickFalse; p+=offset; break; } default: { p+=offset; break; } } if ((offset & 0x01) != 0) p++; } return(profile); } static CompositeOperator PSDBlendModeToCompositeOperator(const char *mode) { if (mode == (const char *) NULL) return(OverCompositeOp); if (LocaleNCompare(mode,"norm",4) == 0) return(OverCompositeOp); if (LocaleNCompare(mode,"mul ",4) == 0) return(MultiplyCompositeOp); if (LocaleNCompare(mode,"diss",4) == 0) return(DissolveCompositeOp); if (LocaleNCompare(mode,"diff",4) == 0) return(DifferenceCompositeOp); if (LocaleNCompare(mode,"dark",4) == 0) return(DarkenCompositeOp); if (LocaleNCompare(mode,"lite",4) == 0) return(LightenCompositeOp); if (LocaleNCompare(mode,"hue ",4) == 0) return(HueCompositeOp); if (LocaleNCompare(mode,"sat ",4) == 0) return(SaturateCompositeOp); if (LocaleNCompare(mode,"colr",4) == 0) return(ColorizeCompositeOp); if (LocaleNCompare(mode,"lum ",4) == 0) return(LuminizeCompositeOp); if (LocaleNCompare(mode,"scrn",4) == 0) return(ScreenCompositeOp); if (LocaleNCompare(mode,"over",4) == 0) return(OverlayCompositeOp); if (LocaleNCompare(mode,"hLit",4) == 0) return(HardLightCompositeOp); if (LocaleNCompare(mode,"sLit",4) == 0) return(SoftLightCompositeOp); if (LocaleNCompare(mode,"smud",4) == 0) return(ExclusionCompositeOp); if (LocaleNCompare(mode,"div ",4) == 0) return(ColorDodgeCompositeOp); if (LocaleNCompare(mode,"idiv",4) == 0) return(ColorBurnCompositeOp); if (LocaleNCompare(mode,"lbrn",4) == 0) return(LinearBurnCompositeOp); if (LocaleNCompare(mode,"lddg",4) == 0) return(LinearDodgeCompositeOp); if (LocaleNCompare(mode,"lLit",4) == 0) return(LinearLightCompositeOp); if (LocaleNCompare(mode,"vLit",4) == 0) return(VividLightCompositeOp); if (LocaleNCompare(mode,"pLit",4) == 0) return(PinLightCompositeOp); if (LocaleNCompare(mode,"hMix",4) == 0) return(HardMixCompositeOp); return(OverCompositeOp); } static inline void ReversePSDString(Image *image,char *p,size_t length) { char *q; if (image->endian == MSBEndian) return; q=p+length; for(--q; p < q; ++p, --q) { *p = *p ^ *q, *q = *p ^ *q, *p = *p ^ *q; } } static inline void SetPSDPixel(Image *image,const size_t channels, const ssize_t type,const size_t packet_size,const Quantum pixel,Quantum *q, ExceptionInfo *exception) { if (image->storage_class == PseudoClass) { PixelInfo *color; Quantum index; index=pixel; if (packet_size == 1) index=(Quantum) ScaleQuantumToChar(index); index=(Quantum) ConstrainColormapIndex(image,(ssize_t) index, exception); if (type == 0) SetPixelIndex(image,index,q); if ((type == 0) && (channels > 1)) return; color=image->colormap+(ssize_t) GetPixelIndex(image,q); if (type != 0) color->alpha=(MagickRealType) pixel; SetPixelViaPixelInfo(image,color,q); return; } switch (type) { case -1: { SetPixelAlpha(image,pixel,q); break; } case -2: case 0: { SetPixelRed(image,pixel,q); break; } case -3: case 1: { SetPixelGreen(image,pixel,q); break; } case -4: case 2: { SetPixelBlue(image,pixel,q); break; } case 3: { if (image->colorspace == CMYKColorspace) SetPixelBlack(image,pixel,q); else if (image->alpha_trait != UndefinedPixelTrait) SetPixelAlpha(image,pixel,q); break; } case 4: { if ((IssRGBCompatibleColorspace(image->colorspace) != MagickFalse) && (channels > 3)) break; if (image->alpha_trait != UndefinedPixelTrait) SetPixelAlpha(image,pixel,q); break; } } } static MagickBooleanType ReadPSDChannelPixels(Image *image, const size_t channels,const ssize_t row,const ssize_t type, const unsigned char *pixels,ExceptionInfo *exception) { Quantum pixel; register const unsigned char *p; register Quantum *q; register ssize_t x; size_t packet_size; p=pixels; q=GetAuthenticPixels(image,0,row,image->columns,1,exception); if (q == (Quantum *) NULL) return MagickFalse; packet_size=GetPSDPacketSize(image); for (x=0; x < (ssize_t) image->columns; x++) { if (packet_size == 1) pixel=ScaleCharToQuantum(*p++); else if (packet_size == 2) { unsigned short nibble; p=PushShortPixel(MSBEndian,p,&nibble); pixel=ScaleShortToQuantum(nibble); } else { MagickFloatType nibble; p=PushFloatPixel(MSBEndian,p,&nibble); pixel=ClampToQuantum((MagickRealType) (QuantumRange*nibble)); } if (image->depth > 1) { SetPSDPixel(image,channels,type,packet_size,pixel,q,exception); q+=GetPixelChannels(image); } else { ssize_t bit, number_bits; number_bits=(ssize_t) image->columns-x; if (number_bits > 8) number_bits=8; for (bit = 0; bit < (ssize_t) number_bits; bit++) { SetPSDPixel(image,channels,type,packet_size,(((unsigned char) pixel) & (0x01 << (7-bit))) != 0 ? 0 : QuantumRange,q,exception); q+=GetPixelChannels(image); x++; } if (x != (ssize_t) image->columns) x--; continue; } } return(SyncAuthenticPixels(image,exception)); } static MagickBooleanType ReadPSDChannelRaw(Image *image,const size_t channels, const ssize_t type,ExceptionInfo *exception) { MagickBooleanType status; size_t row_size; ssize_t count, y; unsigned char *pixels; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer data is RAW"); row_size=GetPSDRowSize(image); pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels)); if (pixels == (unsigned char *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); (void) memset(pixels,0,row_size*sizeof(*pixels)); status=MagickTrue; for (y=0; y < (ssize_t) image->rows; y++) { status=MagickFalse; count=ReadBlob(image,row_size,pixels); if (count != (ssize_t) row_size) { status=MagickFalse; break; } status=ReadPSDChannelPixels(image,channels,y,type,pixels,exception); if (status == MagickFalse) break; } pixels=(unsigned char *) RelinquishMagickMemory(pixels); return(status); } static inline MagickOffsetType *ReadPSDRLESizes(Image *image, const PSDInfo *psd_info,const size_t size) { MagickOffsetType *sizes; ssize_t y; sizes=(MagickOffsetType *) AcquireQuantumMemory(size,sizeof(*sizes)); if(sizes != (MagickOffsetType *) NULL) { for (y=0; y < (ssize_t) size; y++) { if (psd_info->version == 1) sizes[y]=(MagickOffsetType) ReadBlobShort(image); else sizes[y]=(MagickOffsetType) ReadBlobLong(image); } } return sizes; } static MagickBooleanType ReadPSDChannelRLE(Image *image,const PSDInfo *psd_info, const ssize_t type,MagickOffsetType *sizes,ExceptionInfo *exception) { MagickBooleanType status; size_t length, row_size; ssize_t count, y; unsigned char *compact_pixels, *pixels; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer data is RLE compressed"); row_size=GetPSDRowSize(image); pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels)); if (pixels == (unsigned char *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); length=0; for (y=0; y < (ssize_t) image->rows; y++) if ((MagickOffsetType) length < sizes[y]) length=(size_t) sizes[y]; if (length > (row_size+2048)) /* arbitrary number */ { pixels=(unsigned char *) RelinquishMagickMemory(pixels); ThrowBinaryException(ResourceLimitError,"InvalidLength",image->filename); } compact_pixels=(unsigned char *) AcquireQuantumMemory(length,sizeof(*pixels)); if (compact_pixels == (unsigned char *) NULL) { pixels=(unsigned char *) RelinquishMagickMemory(pixels); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } (void) memset(compact_pixels,0,length*sizeof(*compact_pixels)); status=MagickTrue; for (y=0; y < (ssize_t) image->rows; y++) { status=MagickFalse; count=ReadBlob(image,(size_t) sizes[y],compact_pixels); if (count != (ssize_t) sizes[y]) break; count=DecodePSDPixels((size_t) sizes[y],compact_pixels, (ssize_t) (image->depth == 1 ? 123456 : image->depth),row_size,pixels); if (count != (ssize_t) row_size) break; status=ReadPSDChannelPixels(image,psd_info->channels,y,type,pixels, exception); if (status == MagickFalse) break; } compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); pixels=(unsigned char *) RelinquishMagickMemory(pixels); return(status); } #ifdef MAGICKCORE_ZLIB_DELEGATE static MagickBooleanType ReadPSDChannelZip(Image *image,const size_t channels, const ssize_t type,const PSDCompressionType compression, const size_t compact_size,ExceptionInfo *exception) { MagickBooleanType status; register unsigned char *p; size_t count, length, packet_size, row_size; ssize_t y; unsigned char *compact_pixels, *pixels; z_stream stream; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer data is ZIP compressed"); if ((MagickSizeType) compact_size > GetBlobSize(image)) ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile", image->filename); compact_pixels=(unsigned char *) AcquireQuantumMemory(compact_size, sizeof(*compact_pixels)); if (compact_pixels == (unsigned char *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); packet_size=GetPSDPacketSize(image); row_size=image->columns*packet_size; count=image->rows*row_size; pixels=(unsigned char *) AcquireQuantumMemory(count,sizeof(*pixels)); if (pixels == (unsigned char *) NULL) { compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } if (ReadBlob(image,compact_size,compact_pixels) != (ssize_t) compact_size) { pixels=(unsigned char *) RelinquishMagickMemory(pixels); compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile", image->filename); } memset(&stream,0,sizeof(stream)); stream.data_type=Z_BINARY; stream.next_in=(Bytef *)compact_pixels; stream.avail_in=(uInt) compact_size; stream.next_out=(Bytef *)pixels; stream.avail_out=(uInt) count; if (inflateInit(&stream) == Z_OK) { int ret; while (stream.avail_out > 0) { ret=inflate(&stream,Z_SYNC_FLUSH); if ((ret != Z_OK) && (ret != Z_STREAM_END)) { (void) inflateEnd(&stream); compact_pixels=(unsigned char *) RelinquishMagickMemory( compact_pixels); pixels=(unsigned char *) RelinquishMagickMemory(pixels); return(MagickFalse); } if (ret == Z_STREAM_END) break; } (void) inflateEnd(&stream); } if (compression == ZipWithPrediction) { p=pixels; while (count > 0) { length=image->columns; while (--length) { if (packet_size == 2) { p[2]+=p[0]+((p[1]+p[3]) >> 8); p[3]+=p[1]; } /* else if (packet_size == 4) { TODO: Figure out what to do there. } */ else *(p+1)+=*p; p+=packet_size; } p+=packet_size; count-=row_size; } } status=MagickTrue; p=pixels; for (y=0; y < (ssize_t) image->rows; y++) { status=ReadPSDChannelPixels(image,channels,y,type,p,exception); if (status == MagickFalse) break; p+=row_size; } compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); pixels=(unsigned char *) RelinquishMagickMemory(pixels); return(status); } #endif static MagickBooleanType ReadPSDChannel(Image *image, const ImageInfo *image_info,const PSDInfo *psd_info,LayerInfo* layer_info, const size_t channel,const PSDCompressionType compression, ExceptionInfo *exception) { Image *channel_image, *mask; MagickOffsetType offset; MagickBooleanType status; channel_image=image; mask=(Image *) NULL; if ((layer_info->channel_info[channel].type < -1) && (layer_info->mask.page.width > 0) && (layer_info->mask.page.height > 0)) { const char *option; /* Ignore mask that is not a user supplied layer mask, if the mask is disabled or if the flags have unsupported values. */ option=GetImageOption(image_info,"psd:preserve-opacity-mask"); if ((layer_info->channel_info[channel].type != -2) || (layer_info->mask.flags > 2) || ((layer_info->mask.flags & 0x02) && (IsStringTrue(option) == MagickFalse))) { (void) SeekBlob(image,(MagickOffsetType) layer_info->channel_info[channel].size-2,SEEK_CUR); return(MagickTrue); } mask=CloneImage(image,layer_info->mask.page.width, layer_info->mask.page.height,MagickFalse,exception); if (mask != (Image *) NULL) { (void) ResetImagePixels(mask,exception); (void) SetImageType(mask,GrayscaleType,exception); channel_image=mask; } } offset=TellBlob(image); status=MagickFalse; switch(compression) { case Raw: status=ReadPSDChannelRaw(channel_image,psd_info->channels, (ssize_t) layer_info->channel_info[channel].type,exception); break; case RLE: { MagickOffsetType *sizes; sizes=ReadPSDRLESizes(channel_image,psd_info,channel_image->rows); if (sizes == (MagickOffsetType *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=ReadPSDChannelRLE(channel_image,psd_info, (ssize_t) layer_info->channel_info[channel].type,sizes,exception); sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes); } break; case ZipWithPrediction: case ZipWithoutPrediction: #ifdef MAGICKCORE_ZLIB_DELEGATE status=ReadPSDChannelZip(channel_image,layer_info->channels, (ssize_t) layer_info->channel_info[channel].type,compression, layer_info->channel_info[channel].size-2,exception); #else (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn", "'%s' (ZLIB)",image->filename); #endif break; default: (void) ThrowMagickException(exception,GetMagickModule(),TypeWarning, "CompressionNotSupported","'%.20g'",(double) compression); break; } (void) SeekBlob(image,offset+layer_info->channel_info[channel].size-2, SEEK_SET); if (status == MagickFalse) { if (mask != (Image *) NULL) (void) DestroyImage(mask); ThrowBinaryException(CoderError,"UnableToDecompressImage", image->filename); } if (mask != (Image *) NULL) { if (layer_info->mask.image != (Image *) NULL) layer_info->mask.image=DestroyImage(layer_info->mask.image); layer_info->mask.image=mask; } return(status); } static MagickBooleanType ReadPSDLayer(Image *image,const ImageInfo *image_info, const PSDInfo *psd_info,LayerInfo* layer_info,ExceptionInfo *exception) { char message[MagickPathExtent]; MagickBooleanType status; PSDCompressionType compression; ssize_t j; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " setting up new layer image"); if (psd_info->mode != IndexedMode) (void) SetImageBackgroundColor(layer_info->image,exception); layer_info->image->compose=PSDBlendModeToCompositeOperator( layer_info->blendkey); if (layer_info->visible == MagickFalse) layer_info->image->compose=NoCompositeOp; /* Set up some hidden attributes for folks that need them. */ (void) FormatLocaleString(message,MagickPathExtent,"%.20g", (double) layer_info->page.x); (void) SetImageArtifact(layer_info->image,"psd:layer.x",message); (void) FormatLocaleString(message,MagickPathExtent,"%.20g", (double) layer_info->page.y); (void) SetImageArtifact(layer_info->image,"psd:layer.y",message); (void) FormatLocaleString(message,MagickPathExtent,"%.20g",(double) layer_info->opacity); (void) SetImageArtifact(layer_info->image,"psd:layer.opacity",message); (void) SetImageProperty(layer_info->image,"label",(char *) layer_info->name, exception); status=MagickTrue; for (j=0; j < (ssize_t) layer_info->channels; j++) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading data for channel %.20g",(double) j); compression=(PSDCompressionType) ReadBlobShort(layer_info->image); /* TODO: Remove this when we figure out how to support this */ if ((compression == ZipWithPrediction) && (image->depth == 32)) { (void) ThrowMagickException(exception,GetMagickModule(), TypeError,"CompressionNotSupported","ZipWithPrediction(32 bit)"); return(MagickFalse); } layer_info->image->compression=ConvertPSDCompression(compression); if (layer_info->channel_info[j].type == -1) layer_info->image->alpha_trait=BlendPixelTrait; status=ReadPSDChannel(layer_info->image,image_info,psd_info,layer_info, (size_t) j,compression,exception); if (status == MagickFalse) break; } if (status != MagickFalse) status=ApplyPSDLayerOpacity(layer_info->image,layer_info->opacity, MagickFalse,exception); if ((status != MagickFalse) && (layer_info->image->colorspace == CMYKColorspace)) status=NegateCMYK(layer_info->image,exception); if ((status != MagickFalse) && (layer_info->mask.image != (Image *) NULL)) { const char *option; layer_info->mask.image->page.x=layer_info->mask.page.x; layer_info->mask.image->page.y=layer_info->mask.page.y; /* Do not composite the mask when it is disabled */ if ((layer_info->mask.flags & 0x02) == 0x02) layer_info->mask.image->compose=NoCompositeOp; else status=ApplyPSDOpacityMask(layer_info->image,layer_info->mask.image, layer_info->mask.background == 0 ? 0 : QuantumRange,MagickFalse, exception); option=GetImageOption(image_info,"psd:preserve-opacity-mask"); if (IsStringTrue(option) != MagickFalse) PreservePSDOpacityMask(image,layer_info,exception); layer_info->mask.image=DestroyImage(layer_info->mask.image); } return(status); } static MagickBooleanType CheckPSDChannels(const PSDInfo *psd_info, LayerInfo *layer_info) { int channel_type; register ssize_t i; if (layer_info->channels < psd_info->min_channels) return(MagickFalse); channel_type=RedChannel; if (psd_info->min_channels >= 3) channel_type|=(GreenChannel | BlueChannel); if (psd_info->min_channels >= 4) channel_type|=BlackChannel; for (i=0; i < (ssize_t) layer_info->channels; i++) { short type; type=layer_info->channel_info[i].type; if ((i == 0) && (psd_info->mode == IndexedMode) && (type != 0)) return(MagickFalse); if (type == -1) { channel_type|=AlphaChannel; continue; } if (type < -1) continue; if (type == 0) channel_type&=~RedChannel; else if (type == 1) channel_type&=~GreenChannel; else if (type == 2) channel_type&=~BlueChannel; else if (type == 3) channel_type&=~BlackChannel; } if (channel_type == 0) return(MagickTrue); if ((channel_type == AlphaChannel) && (layer_info->channels >= psd_info->min_channels + 1)) return(MagickTrue); return(MagickFalse); } static void AttachPSDLayers(Image *image,LayerInfo *layer_info, ssize_t number_layers) { register ssize_t i; ssize_t j; for (i=0; i < number_layers; i++) { if (layer_info[i].image == (Image *) NULL) { for (j=i; j < number_layers - 1; j++) layer_info[j] = layer_info[j+1]; number_layers--; i--; } } if (number_layers == 0) { layer_info=(LayerInfo *) RelinquishMagickMemory(layer_info); return; } for (i=0; i < number_layers; i++) { if (i > 0) layer_info[i].image->previous=layer_info[i-1].image; if (i < (number_layers-1)) layer_info[i].image->next=layer_info[i+1].image; layer_info[i].image->page=layer_info[i].page; } image->next=layer_info[0].image; layer_info[0].image->previous=image; layer_info=(LayerInfo *) RelinquishMagickMemory(layer_info); } static inline MagickBooleanType PSDSkipImage(const PSDInfo *psd_info, const ImageInfo *image_info,const size_t index) { if (psd_info->has_merged_image == MagickFalse) return(MagickFalse); if (image_info->number_scenes == 0) return(MagickFalse); if (index < image_info->scene) return(MagickTrue); if (index > image_info->scene+image_info->number_scenes-1) return(MagickTrue); return(MagickFalse); } static void CheckMergedImageAlpha(const PSDInfo *psd_info,Image *image) { /* The number of layers cannot be used to determine if the merged image contains an alpha channel. So we enable it when we think we should. */ if (((psd_info->mode == GrayscaleMode) && (psd_info->channels > 2)) || ((psd_info->mode == RGBMode) && (psd_info->channels > 3)) || ((psd_info->mode == CMYKMode) && (psd_info->channels > 4))) image->alpha_trait=BlendPixelTrait; } static MagickBooleanType ReadPSDLayersInternal(Image *image, const ImageInfo *image_info,const PSDInfo *psd_info, const MagickBooleanType skip_layers,ExceptionInfo *exception) { char type[4]; LayerInfo *layer_info; MagickSizeType size; MagickBooleanType status; register ssize_t i; ssize_t count, index, j, number_layers; size=GetPSDSize(psd_info,image); if (size == 0) { /* Skip layers & masks. */ (void) ReadBlobLong(image); count=ReadBlob(image,4,(unsigned char *) type); if (count == 4) ReversePSDString(image,type,(size_t) count); if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0)) { CheckMergedImageAlpha(psd_info,image); return(MagickTrue); } else { count=ReadBlob(image,4,(unsigned char *) type); if (count == 4) ReversePSDString(image,type,4); if ((count == 4) && ((LocaleNCompare(type,"Lr16",4) == 0) || (LocaleNCompare(type,"Lr32",4) == 0))) size=GetPSDSize(psd_info,image); else { CheckMergedImageAlpha(psd_info,image); return(MagickTrue); } } } if (size == 0) return(MagickTrue); layer_info=(LayerInfo *) NULL; number_layers=(ssize_t) ReadBlobSignedShort(image); if (number_layers < 0) { /* The first alpha channel in the merged result contains the transparency data for the merged result. */ number_layers=MagickAbsoluteValue(number_layers); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " negative layer count corrected for"); image->alpha_trait=BlendPixelTrait; } /* We only need to know if the image has an alpha channel */ if (skip_layers != MagickFalse) return(MagickTrue); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image contains %.20g layers",(double) number_layers); if (number_layers == 0) ThrowBinaryException(CorruptImageError,"InvalidNumberOfLayers", image->filename); layer_info=(LayerInfo *) AcquireQuantumMemory((size_t) number_layers, sizeof(*layer_info)); if (layer_info == (LayerInfo *) NULL) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " allocation of LayerInfo failed"); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } (void) memset(layer_info,0,(size_t) number_layers*sizeof(*layer_info)); for (i=0; i < number_layers; i++) { ssize_t top, left, bottom, right; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading layer #%.20g",(double) i+1); top=(ssize_t) ReadBlobSignedLong(image); left=(ssize_t) ReadBlobSignedLong(image); bottom=(ssize_t) ReadBlobSignedLong(image); right=(ssize_t) ReadBlobSignedLong(image); if ((right < left) || (bottom < top)) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"ImproperImageHeader", image->filename); } layer_info[i].page.y=top; layer_info[i].page.x=left; layer_info[i].page.width=(size_t) (right-left); layer_info[i].page.height=(size_t) (bottom-top); layer_info[i].channels=ReadBlobShort(image); if (layer_info[i].channels > MaxPSDChannels) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"MaximumChannelsExceeded", image->filename); } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " offset(%.20g,%.20g), size(%.20g,%.20g), channels=%.20g", (double) layer_info[i].page.x,(double) layer_info[i].page.y, (double) layer_info[i].page.height,(double) layer_info[i].page.width,(double) layer_info[i].channels); for (j=0; j < (ssize_t) layer_info[i].channels; j++) { layer_info[i].channel_info[j].type=(short) ReadBlobShort(image); if ((layer_info[i].channel_info[j].type < -4) || (layer_info[i].channel_info[j].type > 4)) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"NoSuchImageChannel", image->filename); } layer_info[i].channel_info[j].size=(size_t) GetPSDSize(psd_info, image); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " channel[%.20g]: type=%.20g, size=%.20g",(double) j, (double) layer_info[i].channel_info[j].type, (double) layer_info[i].channel_info[j].size); } if (CheckPSDChannels(psd_info,&layer_info[i]) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"ImproperImageHeader", image->filename); } count=ReadBlob(image,4,(unsigned char *) type); if (count == 4) ReversePSDString(image,type,4); if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0)) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer type was %.4s instead of 8BIM", type); layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"ImproperImageHeader", image->filename); } count=ReadBlob(image,4,(unsigned char *) layer_info[i].blendkey); if (count != 4) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"ImproperImageHeader", image->filename); } ReversePSDString(image,layer_info[i].blendkey,4); layer_info[i].opacity=(Quantum) ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); layer_info[i].clipping=(unsigned char) ReadBlobByte(image); layer_info[i].flags=(unsigned char) ReadBlobByte(image); layer_info[i].visible=!(layer_info[i].flags & 0x02); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " blend=%.4s, opacity=%.20g, clipping=%s, flags=%d, visible=%s", layer_info[i].blendkey,(double) layer_info[i].opacity, layer_info[i].clipping ? "true" : "false",layer_info[i].flags, layer_info[i].visible ? "true" : "false"); (void) ReadBlobByte(image); /* filler */ size=ReadBlobLong(image); if (size != 0) { MagickSizeType combined_length, length; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer contains additional info"); length=ReadBlobLong(image); combined_length=length+4; if (length != 0) { /* Layer mask info. */ layer_info[i].mask.page.y=(ssize_t) ReadBlobSignedLong(image); layer_info[i].mask.page.x=(ssize_t) ReadBlobSignedLong(image); layer_info[i].mask.page.height=(size_t) (ReadBlobSignedLong(image)-layer_info[i].mask.page.y); layer_info[i].mask.page.width=(size_t) ( ReadBlobSignedLong(image)-layer_info[i].mask.page.x); layer_info[i].mask.background=(unsigned char) ReadBlobByte( image); layer_info[i].mask.flags=(unsigned char) ReadBlobByte(image); if (!(layer_info[i].mask.flags & 0x01)) { layer_info[i].mask.page.y=layer_info[i].mask.page.y- layer_info[i].page.y; layer_info[i].mask.page.x=layer_info[i].mask.page.x- layer_info[i].page.x; } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer mask: offset(%.20g,%.20g), size(%.20g,%.20g), length=%.20g", (double) layer_info[i].mask.page.x,(double) layer_info[i].mask.page.y,(double) layer_info[i].mask.page.width,(double) layer_info[i].mask.page.height,(double) ((MagickOffsetType) length)-18); /* Skip over the rest of the layer mask information. */ if (DiscardBlobBytes(image,(MagickSizeType) (length-18)) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile",image->filename); } } length=ReadBlobLong(image); combined_length+=length+4; if (length != 0) { /* Layer blending ranges info. */ if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer blending ranges: length=%.20g",(double) ((MagickOffsetType) length)); if (DiscardBlobBytes(image,length) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile",image->filename); } } /* Layer name. */ length=(MagickSizeType) (unsigned char) ReadBlobByte(image); combined_length+=length+1; if (length > 0) (void) ReadBlob(image,(size_t) length++,layer_info[i].name); layer_info[i].name[length]='\0'; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer name: %s",layer_info[i].name); if ((length % 4) != 0) { length=4-(length % 4); combined_length+=length; /* Skip over the padding of the layer name */ if (DiscardBlobBytes(image,length) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile",image->filename); } } length=(MagickSizeType) size-combined_length; if (length > 0) { unsigned char *info; if (length > GetBlobSize(image)) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "InsufficientImageDataInFile",image->filename); } layer_info[i].info=AcquireStringInfo((const size_t) length); info=GetStringInfoDatum(layer_info[i].info); (void) ReadBlob(image,(const size_t) length,info); } } } for (i=0; i < number_layers; i++) { if ((layer_info[i].page.width == 0) || (layer_info[i].page.height == 0)) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer data is empty"); if (layer_info[i].info != (StringInfo *) NULL) layer_info[i].info=DestroyStringInfo(layer_info[i].info); continue; } /* Allocate layered image. */ layer_info[i].image=CloneImage(image,layer_info[i].page.width, layer_info[i].page.height,MagickFalse,exception); if (layer_info[i].image == (Image *) NULL) { layer_info=DestroyLayerInfo(layer_info,number_layers); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " allocation of image for layer %.20g failed",(double) i); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } if (layer_info[i].info != (StringInfo *) NULL) { (void) SetImageProfile(layer_info[i].image,"psd:additional-info", layer_info[i].info,exception); layer_info[i].info=DestroyStringInfo(layer_info[i].info); } } if (image_info->ping != MagickFalse) { AttachPSDLayers(image,layer_info,number_layers); return(MagickTrue); } status=MagickTrue; index=0; for (i=0; i < number_layers; i++) { if ((layer_info[i].image == (Image *) NULL) || (PSDSkipImage(psd_info, image_info,++index) != MagickFalse)) { for (j=0; j < (ssize_t) layer_info[i].channels; j++) { if (DiscardBlobBytes(image,(MagickSizeType) layer_info[i].channel_info[j].size) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile",image->filename); } } continue; } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading data for layer %.20g",(double) i); status=ReadPSDLayer(image,image_info,psd_info,&layer_info[i], exception); if (status == MagickFalse) break; status=SetImageProgress(image,LoadImagesTag,(MagickOffsetType) i, (MagickSizeType) number_layers); if (status == MagickFalse) break; } if (status != MagickFalse) AttachPSDLayers(image,layer_info,number_layers); else layer_info=DestroyLayerInfo(layer_info,number_layers); return(status); } ModuleExport MagickBooleanType ReadPSDLayers(Image *image, const ImageInfo *image_info,const PSDInfo *psd_info,ExceptionInfo *exception) { PolicyDomain domain; PolicyRights rights; domain=CoderPolicyDomain; rights=ReadPolicyRights; if (IsRightsAuthorized(domain,rights,"PSD") == MagickFalse) return(MagickTrue); return(ReadPSDLayersInternal(image,image_info,psd_info,MagickFalse, exception)); } static MagickBooleanType ReadPSDMergedImage(const ImageInfo *image_info, Image *image,const PSDInfo *psd_info,ExceptionInfo *exception) { MagickOffsetType *sizes; MagickBooleanType status; PSDCompressionType compression; register ssize_t i; if ((image_info->number_scenes != 0) && (image_info->scene != 0)) return(MagickTrue); compression=(PSDCompressionType) ReadBlobMSBShort(image); image->compression=ConvertPSDCompression(compression); if (compression != Raw && compression != RLE) { (void) ThrowMagickException(exception,GetMagickModule(), TypeWarning,"CompressionNotSupported","'%.20g'",(double) compression); return(MagickFalse); } sizes=(MagickOffsetType *) NULL; if (compression == RLE) { sizes=ReadPSDRLESizes(image,psd_info,image->rows*psd_info->channels); if (sizes == (MagickOffsetType *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } status=MagickTrue; for (i=0; i < (ssize_t) psd_info->channels; i++) { ssize_t type; type=i; if ((type == 1) && (psd_info->channels == 2)) type=-1; if (compression == RLE) status=ReadPSDChannelRLE(image,psd_info,type,sizes+(i*image->rows), exception); else status=ReadPSDChannelRaw(image,psd_info->channels,type,exception); if (status != MagickFalse) status=SetImageProgress(image,LoadImagesTag,(MagickOffsetType) i, psd_info->channels); if (status == MagickFalse) break; } if ((status != MagickFalse) && (image->colorspace == CMYKColorspace)) status=NegateCMYK(image,exception); if (status != MagickFalse) status=CorrectPSDAlphaBlend(image_info,image,exception); sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes); return(status); } static Image *ReadPSDImage(const ImageInfo *image_info,ExceptionInfo *exception) { Image *image; MagickBooleanType skip_layers; MagickOffsetType offset; MagickSizeType length; MagickBooleanType status; PSDInfo psd_info; register ssize_t i; size_t imageListLength; ssize_t count; StringInfo *profile; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=AcquireImage(image_info,exception); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Read image header. */ image->endian=MSBEndian; count=ReadBlob(image,4,(unsigned char *) psd_info.signature); psd_info.version=ReadBlobMSBShort(image); if ((count != 4) || (LocaleNCompare(psd_info.signature,"8BPS",4) != 0) || ((psd_info.version != 1) && (psd_info.version != 2))) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); (void) ReadBlob(image,6,psd_info.reserved); psd_info.channels=ReadBlobMSBShort(image); if (psd_info.channels < 1) ThrowReaderException(CorruptImageError,"MissingImageChannel"); if (psd_info.channels > MaxPSDChannels) ThrowReaderException(CorruptImageError,"MaximumChannelsExceeded"); psd_info.rows=ReadBlobMSBLong(image); psd_info.columns=ReadBlobMSBLong(image); if ((psd_info.version == 1) && ((psd_info.rows > 30000) || (psd_info.columns > 30000))) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); psd_info.depth=ReadBlobMSBShort(image); if ((psd_info.depth != 1) && (psd_info.depth != 8) && (psd_info.depth != 16) && (psd_info.depth != 32)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); psd_info.mode=ReadBlobMSBShort(image); if ((psd_info.mode == IndexedMode) && (psd_info.channels > 3)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Image is %.20g x %.20g with channels=%.20g, depth=%.20g, mode=%s", (double) psd_info.columns,(double) psd_info.rows,(double) psd_info.channels,(double) psd_info.depth,ModeToString((PSDImageType) psd_info.mode)); if (EOFBlob(image) != MagickFalse) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); /* Initialize image. */ image->depth=psd_info.depth; image->columns=psd_info.columns; image->rows=psd_info.rows; status=SetImageExtent(image,image->columns,image->rows,exception); if (status == MagickFalse) return(DestroyImageList(image)); status=ResetImagePixels(image,exception); if (status == MagickFalse) return(DestroyImageList(image)); psd_info.min_channels=3; if (psd_info.mode == LabMode) (void) SetImageColorspace(image,LabColorspace,exception); if (psd_info.mode == CMYKMode) { psd_info.min_channels=4; (void) SetImageColorspace(image,CMYKColorspace,exception); } else if ((psd_info.mode == BitmapMode) || (psd_info.mode == GrayscaleMode) || (psd_info.mode == DuotoneMode)) { if (psd_info.depth != 32) { status=AcquireImageColormap(image,MagickMin((size_t) (psd_info.depth < 16 ? 256 : 65536), MaxColormapSize),exception); if (status == MagickFalse) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Image colormap allocated"); } psd_info.min_channels=1; (void) SetImageColorspace(image,GRAYColorspace,exception); } else if (psd_info.mode == IndexedMode) psd_info.min_channels=1; if (psd_info.channels < psd_info.min_channels) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); /* Read PSD raster colormap only present for indexed and duotone images. */ length=ReadBlobMSBLong(image); if ((psd_info.mode == IndexedMode) && (length < 3)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (length != 0) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading colormap"); if ((psd_info.mode == DuotoneMode) || (psd_info.depth == 32)) { /* Duotone image data; the format of this data is undocumented. 32 bits per pixel; the colormap is ignored. */ (void) SeekBlob(image,(const MagickOffsetType) length,SEEK_CUR); } else { size_t number_colors; /* Read PSD raster colormap. */ number_colors=(size_t) length/3; if (number_colors > 65536) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (AcquireImageColormap(image,number_colors,exception) == MagickFalse) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); for (i=0; i < (ssize_t) image->colors; i++) image->colormap[i].red=(MagickRealType) ScaleCharToQuantum( (unsigned char) ReadBlobByte(image)); for (i=0; i < (ssize_t) image->colors; i++) image->colormap[i].green=(MagickRealType) ScaleCharToQuantum( (unsigned char) ReadBlobByte(image)); for (i=0; i < (ssize_t) image->colors; i++) image->colormap[i].blue=(MagickRealType) ScaleCharToQuantum( (unsigned char) ReadBlobByte(image)); image->alpha_trait=UndefinedPixelTrait; } } if ((image->depth == 1) && (image->storage_class != PseudoClass)) ThrowReaderException(CorruptImageError, "ImproperImageHeader"); psd_info.has_merged_image=MagickTrue; profile=(StringInfo *) NULL; length=ReadBlobMSBLong(image); if (length != 0) { unsigned char *blocks; /* Image resources block. */ if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading image resource blocks - %.20g bytes",(double) ((MagickOffsetType) length)); if (length > GetBlobSize(image)) ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile"); blocks=(unsigned char *) AcquireQuantumMemory((size_t) length, sizeof(*blocks)); if (blocks == (unsigned char *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); count=ReadBlob(image,(size_t) length,blocks); if ((count != (ssize_t) length) || (length < 4) || (LocaleNCompare((char *) blocks,"8BIM",4) != 0)) { blocks=(unsigned char *) RelinquishMagickMemory(blocks); ThrowReaderException(CorruptImageError,"ImproperImageHeader"); } profile=ParseImageResourceBlocks(&psd_info,image,blocks,(size_t) length, exception); blocks=(unsigned char *) RelinquishMagickMemory(blocks); } /* Layer and mask block. */ length=GetPSDSize(&psd_info,image); if (length == 8) { length=ReadBlobMSBLong(image); length=ReadBlobMSBLong(image); } offset=TellBlob(image); skip_layers=MagickFalse; if ((image_info->number_scenes == 1) && (image_info->scene == 0) && (psd_info.has_merged_image != MagickFalse)) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " read composite only"); skip_layers=MagickTrue; } if (length == 0) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image has no layers"); } else { if (ReadPSDLayersInternal(image,image_info,&psd_info,skip_layers, exception) != MagickTrue) { if (profile != (StringInfo *) NULL) profile=DestroyStringInfo(profile); (void) CloseBlob(image); image=DestroyImageList(image); return((Image *) NULL); } /* Skip the rest of the layer and mask information. */ (void) SeekBlob(image,offset+length,SEEK_SET); } /* If we are only "pinging" the image, then we're done - so return. */ if (EOFBlob(image) != MagickFalse) { if (profile != (StringInfo *) NULL) profile=DestroyStringInfo(profile); ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile"); } if (image_info->ping != MagickFalse) { if (profile != (StringInfo *) NULL) profile=DestroyStringInfo(profile); (void) CloseBlob(image); return(GetFirstImageInList(image)); } /* Read the precombined layer, present for PSD < 4 compatibility. */ if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading the precombined layer"); imageListLength=GetImageListLength(image); if ((psd_info.has_merged_image != MagickFalse) || (imageListLength == 1)) psd_info.has_merged_image=(MagickBooleanType) ReadPSDMergedImage( image_info,image,&psd_info,exception); if ((psd_info.has_merged_image == MagickFalse) && (imageListLength == 1) && (length != 0)) { (void) SeekBlob(image,offset,SEEK_SET); status=ReadPSDLayersInternal(image,image_info,&psd_info,MagickFalse, exception); if (status != MagickTrue) { if (profile != (StringInfo *) NULL) profile=DestroyStringInfo(profile); (void) CloseBlob(image); image=DestroyImageList(image); return((Image *) NULL); } } if (psd_info.has_merged_image == MagickFalse) { Image *merged; if (imageListLength == 1) { if (profile != (StringInfo *) NULL) profile=DestroyStringInfo(profile); ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile"); } image->background_color.alpha=(MagickRealType) TransparentAlpha; image->background_color.alpha_trait=BlendPixelTrait; (void) SetImageBackgroundColor(image,exception); merged=MergeImageLayers(image,FlattenLayer,exception); ReplaceImageInList(&image,merged); } if (profile != (StringInfo *) NULL) { Image *next; i=0; next=image; while (next != (Image *) NULL) { if (PSDSkipImage(&psd_info,image_info,i++) == MagickFalse) (void) SetImageProfile(next,GetStringInfoName(profile),profile, exception); next=next->next; } profile=DestroyStringInfo(profile); } (void) CloseBlob(image); return(GetFirstImageInList(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e g i s t e r P S D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RegisterPSDImage() adds properties for the PSD image format to % the list of supported formats. The properties include the image format % tag, a method to read and/or write the format, whether the format % supports the saving of more than one frame to the same file or blob, % whether the format supports native in-memory I/O, and a brief % description of the format. % % The format of the RegisterPSDImage method is: % % size_t RegisterPSDImage(void) % */ ModuleExport size_t RegisterPSDImage(void) { MagickInfo *entry; entry=AcquireMagickInfo("PSD","PSB","Adobe Large Document Format"); entry->decoder=(DecodeImageHandler *) ReadPSDImage; entry->encoder=(EncodeImageHandler *) WritePSDImage; entry->magick=(IsImageFormatHandler *) IsPSD; entry->flags|=CoderDecoderSeekableStreamFlag; entry->flags|=CoderEncoderSeekableStreamFlag; (void) RegisterMagickInfo(entry); entry=AcquireMagickInfo("PSD","PSD","Adobe Photoshop bitmap"); entry->decoder=(DecodeImageHandler *) ReadPSDImage; entry->encoder=(EncodeImageHandler *) WritePSDImage; entry->magick=(IsImageFormatHandler *) IsPSD; entry->flags|=CoderDecoderSeekableStreamFlag; entry->flags|=CoderEncoderSeekableStreamFlag; (void) RegisterMagickInfo(entry); return(MagickImageCoderSignature); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n r e g i s t e r P S D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnregisterPSDImage() removes format registrations made by the % PSD module from the list of supported formats. % % The format of the UnregisterPSDImage method is: % % UnregisterPSDImage(void) % */ ModuleExport void UnregisterPSDImage(void) { (void) UnregisterMagickInfo("PSB"); (void) UnregisterMagickInfo("PSD"); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e P S D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WritePSDImage() writes an image in the Adobe Photoshop encoded image format. % % The format of the WritePSDImage method is: % % MagickBooleanType WritePSDImage(const ImageInfo *image_info,Image *image, % ExceptionInfo *exception) % % A description of each parameter follows. % % o image_info: the image info. % % o image: The image. % % o exception: return any errors or warnings in this structure. % */ static inline ssize_t SetPSDOffset(const PSDInfo *psd_info,Image *image, const size_t offset) { if (psd_info->version == 1) return(WriteBlobMSBShort(image,(unsigned short) offset)); return(WriteBlobMSBLong(image,(unsigned int) offset)); } static inline ssize_t WritePSDOffset(const PSDInfo *psd_info,Image *image, const MagickSizeType size,const MagickOffsetType offset) { MagickOffsetType current_offset; ssize_t result; current_offset=TellBlob(image); (void) SeekBlob(image,offset,SEEK_SET); if (psd_info->version == 1) result=WriteBlobMSBShort(image,(unsigned short) size); else result=WriteBlobMSBLong(image,(unsigned int) size); (void) SeekBlob(image,current_offset,SEEK_SET); return(result); } static inline ssize_t SetPSDSize(const PSDInfo *psd_info,Image *image, const MagickSizeType size) { if (psd_info->version == 1) return(WriteBlobLong(image,(unsigned int) size)); return(WriteBlobLongLong(image,size)); } static inline ssize_t WritePSDSize(const PSDInfo *psd_info,Image *image, const MagickSizeType size,const MagickOffsetType offset) { MagickOffsetType current_offset; ssize_t result; current_offset=TellBlob(image); (void) SeekBlob(image,offset,SEEK_SET); result=SetPSDSize(psd_info,image,size); (void) SeekBlob(image,current_offset,SEEK_SET); return(result); } static size_t PSDPackbitsEncodeImage(Image *image,const size_t length, const unsigned char *pixels,unsigned char *compact_pixels, ExceptionInfo *exception) { int count; register ssize_t i, j; register unsigned char *q; unsigned char *packbits; /* Compress pixels with Packbits encoding. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(pixels != (unsigned char *) NULL); assert(compact_pixels != (unsigned char *) NULL); packbits=(unsigned char *) AcquireQuantumMemory(128UL,sizeof(*packbits)); if (packbits == (unsigned char *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); q=compact_pixels; for (i=(ssize_t) length; i != 0; ) { switch (i) { case 1: { i--; *q++=(unsigned char) 0; *q++=(*pixels); break; } case 2: { i-=2; *q++=(unsigned char) 1; *q++=(*pixels); *q++=pixels[1]; break; } case 3: { i-=3; if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2))) { *q++=(unsigned char) ((256-3)+1); *q++=(*pixels); break; } *q++=(unsigned char) 2; *q++=(*pixels); *q++=pixels[1]; *q++=pixels[2]; break; } default: { if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2))) { /* Packed run. */ count=3; while (((ssize_t) count < i) && (*pixels == *(pixels+count))) { count++; if (count >= 127) break; } i-=count; *q++=(unsigned char) ((256-count)+1); *q++=(*pixels); pixels+=count; break; } /* Literal run. */ count=0; while ((*(pixels+count) != *(pixels+count+1)) || (*(pixels+count+1) != *(pixels+count+2))) { packbits[count+1]=pixels[count]; count++; if (((ssize_t) count >= (i-3)) || (count >= 127)) break; } i-=count; *packbits=(unsigned char) (count-1); for (j=0; j <= (ssize_t) count; j++) *q++=packbits[j]; pixels+=count; break; } } } *q++=(unsigned char) 128; /* EOD marker */ packbits=(unsigned char *) RelinquishMagickMemory(packbits); return((size_t) (q-compact_pixels)); } static size_t WriteCompressionStart(const PSDInfo *psd_info,Image *image, const Image *next_image,const CompressionType compression, const ssize_t channels) { size_t length; ssize_t i, y; if (compression == RLECompression) { length=(size_t) WriteBlobShort(image,RLE); for (i=0; i < channels; i++) for (y=0; y < (ssize_t) next_image->rows; y++) length+=SetPSDOffset(psd_info,image,0); } #ifdef MAGICKCORE_ZLIB_DELEGATE else if (compression == ZipCompression) length=(size_t) WriteBlobShort(image,ZipWithoutPrediction); #endif else length=(size_t) WriteBlobShort(image,Raw); return(length); } static size_t WritePSDChannel(const PSDInfo *psd_info, const ImageInfo *image_info,Image *image,Image *next_image, const QuantumType quantum_type, unsigned char *compact_pixels, MagickOffsetType size_offset,const MagickBooleanType separate, const CompressionType compression,ExceptionInfo *exception) { MagickBooleanType monochrome; QuantumInfo *quantum_info; register const Quantum *p; register ssize_t i; size_t count, length; ssize_t y; unsigned char *pixels; #ifdef MAGICKCORE_ZLIB_DELEGATE int flush, level; unsigned char *compressed_pixels; z_stream stream; compressed_pixels=(unsigned char *) NULL; flush=Z_NO_FLUSH; #endif count=0; if (separate != MagickFalse) { size_offset=TellBlob(image)+2; count+=WriteCompressionStart(psd_info,image,next_image,compression,1); } if (next_image->depth > 8) next_image->depth=16; monochrome=IsImageMonochrome(image) && (image->depth == 1) ? MagickTrue : MagickFalse; quantum_info=AcquireQuantumInfo(image_info,next_image); if (quantum_info == (QuantumInfo *) NULL) return(0); pixels=(unsigned char *) GetQuantumPixels(quantum_info); #ifdef MAGICKCORE_ZLIB_DELEGATE if (compression == ZipCompression) { compressed_pixels=(unsigned char *) AcquireQuantumMemory( MagickMinBufferExtent,sizeof(*compressed_pixels)); if (compressed_pixels == (unsigned char *) NULL) { quantum_info=DestroyQuantumInfo(quantum_info); return(0); } memset(&stream,0,sizeof(stream)); stream.data_type=Z_BINARY; level=Z_DEFAULT_COMPRESSION; if ((image_info->quality > 0 && image_info->quality < 10)) level=(int) image_info->quality; if (deflateInit(&stream,level) != Z_OK) { quantum_info=DestroyQuantumInfo(quantum_info); compressed_pixels=(unsigned char *) RelinquishMagickMemory( compressed_pixels); return(0); } } #endif for (y=0; y < (ssize_t) next_image->rows; y++) { p=GetVirtualPixels(next_image,0,y,next_image->columns,1,exception); if (p == (const Quantum *) NULL) break; length=ExportQuantumPixels(next_image,(CacheView *) NULL,quantum_info, quantum_type,pixels,exception); if (monochrome != MagickFalse) for (i=0; i < (ssize_t) length; i++) pixels[i]=(~pixels[i]); if (compression == RLECompression) { length=PSDPackbitsEncodeImage(image,length,pixels,compact_pixels, exception); count+=WriteBlob(image,length,compact_pixels); size_offset+=WritePSDOffset(psd_info,image,length,size_offset); } #ifdef MAGICKCORE_ZLIB_DELEGATE else if (compression == ZipCompression) { stream.avail_in=(uInt) length; stream.next_in=(Bytef *) pixels; if (y == (ssize_t) next_image->rows-1) flush=Z_FINISH; do { stream.avail_out=(uInt) MagickMinBufferExtent; stream.next_out=(Bytef *) compressed_pixels; if (deflate(&stream,flush) == Z_STREAM_ERROR) break; length=(size_t) MagickMinBufferExtent-stream.avail_out; if (length > 0) count+=WriteBlob(image,length,compressed_pixels); } while (stream.avail_out == 0); } #endif else count+=WriteBlob(image,length,pixels); } #ifdef MAGICKCORE_ZLIB_DELEGATE if (compression == ZipCompression) { (void) deflateEnd(&stream); compressed_pixels=(unsigned char *) RelinquishMagickMemory( compressed_pixels); } #endif quantum_info=DestroyQuantumInfo(quantum_info); return(count); } static unsigned char *AcquireCompactPixels(const Image *image, ExceptionInfo *exception) { size_t packet_size; unsigned char *compact_pixels; packet_size=image->depth > 8UL ? 2UL : 1UL; compact_pixels=(unsigned char *) AcquireQuantumMemory((9* image->columns)+1,packet_size*sizeof(*compact_pixels)); if (compact_pixels == (unsigned char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); } return(compact_pixels); } static size_t WritePSDChannels(const PSDInfo *psd_info, const ImageInfo *image_info,Image *image,Image *next_image, MagickOffsetType size_offset,const MagickBooleanType separate, ExceptionInfo *exception) { CompressionType compression; Image *mask; MagickOffsetType rows_offset; size_t channels, count, length, offset_length; unsigned char *compact_pixels; count=0; offset_length=0; rows_offset=0; compact_pixels=(unsigned char *) NULL; compression=next_image->compression; if (image_info->compression != UndefinedCompression) compression=image_info->compression; if (compression == RLECompression) { compact_pixels=AcquireCompactPixels(next_image,exception); if (compact_pixels == (unsigned char *) NULL) return(0); } channels=1; if (separate == MagickFalse) { if ((next_image->storage_class != PseudoClass) || (IsImageGray(next_image) != MagickFalse)) { if (IsImageGray(next_image) == MagickFalse) channels=(size_t) (next_image->colorspace == CMYKColorspace ? 4 : 3); if (next_image->alpha_trait != UndefinedPixelTrait) channels++; } rows_offset=TellBlob(image)+2; count+=WriteCompressionStart(psd_info,image,next_image,compression, (ssize_t) channels); offset_length=(next_image->rows*(psd_info->version == 1 ? 2 : 4)); } size_offset+=2; if ((next_image->storage_class == PseudoClass) && (IsImageGray(next_image) == MagickFalse)) { length=WritePSDChannel(psd_info,image_info,image,next_image, IndexQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } else { if (IsImageGray(next_image) != MagickFalse) { length=WritePSDChannel(psd_info,image_info,image,next_image, GrayQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } else { if (next_image->colorspace == CMYKColorspace) (void) NegateCMYK(next_image,exception); length=WritePSDChannel(psd_info,image_info,image,next_image, RedQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; length=WritePSDChannel(psd_info,image_info,image,next_image, GreenQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; length=WritePSDChannel(psd_info,image_info,image,next_image, BlueQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; if (next_image->colorspace == CMYKColorspace) { length=WritePSDChannel(psd_info,image_info,image,next_image, BlackQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } } if (next_image->alpha_trait != UndefinedPixelTrait) { length=WritePSDChannel(psd_info,image_info,image,next_image, AlphaQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } } compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); if (next_image->colorspace == CMYKColorspace) (void) NegateCMYK(next_image,exception); if (separate != MagickFalse) { const char *property; property=GetImageArtifact(next_image,"psd:opacity-mask"); if (property != (const char *) NULL) { mask=(Image *) GetImageRegistry(ImageRegistryType,property, exception); if (mask != (Image *) NULL) { if (compression == RLECompression) { compact_pixels=AcquireCompactPixels(mask,exception); if (compact_pixels == (unsigned char *) NULL) return(0); } length=WritePSDChannel(psd_info,image_info,image,mask, RedQuantum,compact_pixels,rows_offset,MagickTrue,compression, exception); (void) WritePSDSize(psd_info,image,length,size_offset); count+=length; compact_pixels=(unsigned char *) RelinquishMagickMemory( compact_pixels); } } } return(count); } static size_t WritePascalString(Image *image,const char *value,size_t padding) { size_t count, length; register ssize_t i; /* Max length is 255. */ count=0; length=(strlen(value) > 255UL ) ? 255UL : strlen(value); if (length == 0) count+=WriteBlobByte(image,0); else { count+=WriteBlobByte(image,(unsigned char) length); count+=WriteBlob(image,length,(const unsigned char *) value); } length++; if ((length % padding) == 0) return(count); for (i=0; i < (ssize_t) (padding-(length % padding)); i++) count+=WriteBlobByte(image,0); return(count); } static void WriteResolutionResourceBlock(Image *image) { double x_resolution, y_resolution; unsigned short units; if (image->units == PixelsPerCentimeterResolution) { x_resolution=2.54*65536.0*image->resolution.x+0.5; y_resolution=2.54*65536.0*image->resolution.y+0.5; units=2; } else { x_resolution=65536.0*image->resolution.x+0.5; y_resolution=65536.0*image->resolution.y+0.5; units=1; } (void) WriteBlob(image,4,(const unsigned char *) "8BIM"); (void) WriteBlobMSBShort(image,0x03ED); (void) WriteBlobMSBShort(image,0); (void) WriteBlobMSBLong(image,16); /* resource size */ (void) WriteBlobMSBLong(image,(unsigned int) (x_resolution+0.5)); (void) WriteBlobMSBShort(image,units); /* horizontal resolution unit */ (void) WriteBlobMSBShort(image,units); /* width unit */ (void) WriteBlobMSBLong(image,(unsigned int) (y_resolution+0.5)); (void) WriteBlobMSBShort(image,units); /* vertical resolution unit */ (void) WriteBlobMSBShort(image,units); /* height unit */ } static inline size_t WriteChannelSize(const PSDInfo *psd_info,Image *image, const signed short channel) { size_t count; count=(size_t) WriteBlobShort(image,(const unsigned short) channel); count+=SetPSDSize(psd_info,image,0); return(count); } static void RemoveICCProfileFromResourceBlock(StringInfo *bim_profile) { register const unsigned char *p; size_t length; unsigned char *datum; unsigned int count, long_sans; unsigned short id, short_sans; length=GetStringInfoLength(bim_profile); if (length < 16) return; datum=GetStringInfoDatum(bim_profile); for (p=datum; (p >= datum) && (p < (datum+length-16)); ) { register unsigned char *q; q=(unsigned char *) p; if (LocaleNCompare((const char *) p,"8BIM",4) != 0) break; p=PushLongPixel(MSBEndian,p,&long_sans); p=PushShortPixel(MSBEndian,p,&id); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushLongPixel(MSBEndian,p,&count); if (id == 0x0000040f) { ssize_t quantum; quantum=PSDQuantum(count)+12; if ((quantum >= 12) && (quantum < (ssize_t) length)) { if ((q+quantum < (datum+length-16))) (void) memmove(q,q+quantum,length-quantum-(q-datum)); SetStringInfoLength(bim_profile,length-quantum); } break; } p+=count; if ((count & 0x01) != 0) p++; } } static void RemoveResolutionFromResourceBlock(StringInfo *bim_profile) { register const unsigned char *p; size_t length; unsigned char *datum; unsigned int count, long_sans; unsigned short id, short_sans; length=GetStringInfoLength(bim_profile); if (length < 16) return; datum=GetStringInfoDatum(bim_profile); for (p=datum; (p >= datum) && (p < (datum+length-16)); ) { register unsigned char *q; ssize_t cnt; q=(unsigned char *) p; if (LocaleNCompare((const char *) p,"8BIM",4) != 0) return; p=PushLongPixel(MSBEndian,p,&long_sans); p=PushShortPixel(MSBEndian,p,&id); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushLongPixel(MSBEndian,p,&count); cnt=PSDQuantum(count); if (cnt < 0) return; if ((id == 0x000003ed) && (cnt < (ssize_t) (length-12)) && ((ssize_t) length-(cnt+12)-(q-datum)) > 0) { (void) memmove(q,q+cnt+12,length-(cnt+12)-(q-datum)); SetStringInfoLength(bim_profile,length-(cnt+12)); break; } p+=count; if ((count & 0x01) != 0) p++; } } static const StringInfo *GetAdditionalInformation(const ImageInfo *image_info, Image *image,ExceptionInfo *exception) { #define PSDKeySize 5 #define PSDAllowedLength 36 char key[PSDKeySize]; /* Whitelist of keys from: https://www.adobe.com/devnet-apps/photoshop/fileformatashtml/ */ const char allowed[PSDAllowedLength][PSDKeySize] = { "blnc", "blwh", "brit", "brst", "clbl", "clrL", "curv", "expA", "FMsk", "GdFl", "grdm", "hue ", "hue2", "infx", "knko", "lclr", "levl", "lnsr", "lfx2", "luni", "lrFX", "lspf", "lyid", "lyvr", "mixr", "nvrt", "phfl", "post", "PtFl", "selc", "shpa", "sn2P", "SoCo", "thrs", "tsly", "vibA" }, *option; const StringInfo *info; MagickBooleanType found; register size_t i; size_t remaining_length, length; StringInfo *profile; unsigned char *p; unsigned int size; info=GetImageProfile(image,"psd:additional-info"); if (info == (const StringInfo *) NULL) return((const StringInfo *) NULL); option=GetImageOption(image_info,"psd:additional-info"); if (LocaleCompare(option,"all") == 0) return(info); if (LocaleCompare(option,"selective") != 0) { profile=RemoveImageProfile(image,"psd:additional-info"); return(DestroyStringInfo(profile)); } length=GetStringInfoLength(info); p=GetStringInfoDatum(info); remaining_length=length; length=0; while (remaining_length >= 12) { /* skip over signature */ p+=4; key[0]=(char) (*p++); key[1]=(char) (*p++); key[2]=(char) (*p++); key[3]=(char) (*p++); key[4]='\0'; size=(unsigned int) (*p++) << 24; size|=(unsigned int) (*p++) << 16; size|=(unsigned int) (*p++) << 8; size|=(unsigned int) (*p++); size=size & 0xffffffff; remaining_length-=12; if ((size_t) size > remaining_length) return((const StringInfo *) NULL); found=MagickFalse; for (i=0; i < PSDAllowedLength; i++) { if (LocaleNCompare(key,allowed[i],PSDKeySize) != 0) continue; found=MagickTrue; break; } remaining_length-=(size_t) size; if (found == MagickFalse) { if (remaining_length > 0) p=(unsigned char *) memmove(p-12,p+size,remaining_length); continue; } length+=(size_t) size+12; p+=size; } profile=RemoveImageProfile(image,"psd:additional-info"); if (length == 0) return(DestroyStringInfo(profile)); SetStringInfoLength(profile,(const size_t) length); (void) SetImageProfile(image,"psd:additional-info",info,exception); return(profile); } static MagickBooleanType WritePSDLayersInternal(Image *image, const ImageInfo *image_info,const PSDInfo *psd_info,size_t *layers_size, ExceptionInfo *exception) { char layer_name[MagickPathExtent]; const char *property; const StringInfo *info; Image *base_image, *next_image; MagickBooleanType status; MagickOffsetType *layer_size_offsets, size_offset; register ssize_t i; size_t layer_count, layer_index, length, name_length, rounded_size, size; status=MagickTrue; base_image=GetNextImageInList(image); if (base_image == (Image *) NULL) base_image=image; size=0; size_offset=TellBlob(image); (void) SetPSDSize(psd_info,image,0); layer_count=0; for (next_image=base_image; next_image != NULL; ) { layer_count++; next_image=GetNextImageInList(next_image); } if (image->alpha_trait != UndefinedPixelTrait) size+=WriteBlobShort(image,-(unsigned short) layer_count); else size+=WriteBlobShort(image,(unsigned short) layer_count); layer_size_offsets=(MagickOffsetType *) AcquireQuantumMemory( (size_t) layer_count,sizeof(MagickOffsetType)); if (layer_size_offsets == (MagickOffsetType *) NULL) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); layer_index=0; for (next_image=base_image; next_image != NULL; ) { Image *mask; unsigned char default_color; unsigned short channels, total_channels; mask=(Image *) NULL; property=GetImageArtifact(next_image,"psd:opacity-mask"); default_color=0; if (property != (const char *) NULL) { mask=(Image *) GetImageRegistry(ImageRegistryType,property,exception); default_color=(unsigned char) (strlen(property) == 9 ? 255 : 0); } size+=WriteBlobSignedLong(image,(signed int) next_image->page.y); size+=WriteBlobSignedLong(image,(signed int) next_image->page.x); size+=WriteBlobSignedLong(image,(signed int) (next_image->page.y+ next_image->rows)); size+=WriteBlobSignedLong(image,(signed int) (next_image->page.x+ next_image->columns)); channels=1; if ((next_image->storage_class != PseudoClass) && (IsImageGray(next_image) == MagickFalse)) channels=(unsigned short) (next_image->colorspace == CMYKColorspace ? 4 : 3); total_channels=channels; if (next_image->alpha_trait != UndefinedPixelTrait) total_channels++; if (mask != (Image *) NULL) total_channels++; size+=WriteBlobShort(image,total_channels); layer_size_offsets[layer_index++]=TellBlob(image); for (i=0; i < (ssize_t) channels; i++) size+=WriteChannelSize(psd_info,image,(signed short) i); if (next_image->alpha_trait != UndefinedPixelTrait) size+=WriteChannelSize(psd_info,image,-1); if (mask != (Image *) NULL) size+=WriteChannelSize(psd_info,image,-2); size+=WriteBlobString(image,image->endian == LSBEndian ? "MIB8" :"8BIM"); size+=WriteBlobString(image,CompositeOperatorToPSDBlendMode(next_image)); property=GetImageArtifact(next_image,"psd:layer.opacity"); if (property != (const char *) NULL) { Quantum opacity; opacity=(Quantum) StringToInteger(property); size+=WriteBlobByte(image,ScaleQuantumToChar(opacity)); (void) ApplyPSDLayerOpacity(next_image,opacity,MagickTrue,exception); } else size+=WriteBlobByte(image,255); size+=WriteBlobByte(image,0); size+=WriteBlobByte(image,(const unsigned char) (next_image->compose == NoCompositeOp ? 1 << 0x02 : 1)); /* layer properties - visible, etc. */ size+=WriteBlobByte(image,0); info=GetAdditionalInformation(image_info,next_image,exception); property=(const char *) GetImageProperty(next_image,"label",exception); if (property == (const char *) NULL) { (void) FormatLocaleString(layer_name,MagickPathExtent,"L%.20g", (double) layer_index); property=layer_name; } name_length=strlen(property)+1; if ((name_length % 4) != 0) name_length+=(4-(name_length % 4)); if (info != (const StringInfo *) NULL) name_length+=GetStringInfoLength(info); name_length+=8; if (mask != (Image *) NULL) name_length+=20; size+=WriteBlobLong(image,(unsigned int) name_length); if (mask == (Image *) NULL) size+=WriteBlobLong(image,0); else { if (mask->compose != NoCompositeOp) (void) ApplyPSDOpacityMask(next_image,mask,ScaleCharToQuantum( default_color),MagickTrue,exception); mask->page.y+=image->page.y; mask->page.x+=image->page.x; size+=WriteBlobLong(image,20); size+=WriteBlobSignedLong(image,(const signed int) mask->page.y); size+=WriteBlobSignedLong(image,(const signed int) mask->page.x); size+=WriteBlobSignedLong(image,(const signed int) (mask->rows+ mask->page.y)); size+=WriteBlobSignedLong(image,(const signed int) (mask->columns+ mask->page.x)); size+=WriteBlobByte(image,default_color); size+=WriteBlobByte(image,(const unsigned char) (mask->compose == NoCompositeOp ? 2 : 0)); size+=WriteBlobMSBShort(image,0); } size+=WriteBlobLong(image,0); size+=WritePascalString(image,property,4); if (info != (const StringInfo *) NULL) size+=WriteBlob(image,GetStringInfoLength(info), GetStringInfoDatum(info)); next_image=GetNextImageInList(next_image); } /* Now the image data! */ next_image=base_image; layer_index=0; while (next_image != NULL) { length=WritePSDChannels(psd_info,image_info,image,next_image, layer_size_offsets[layer_index++],MagickTrue,exception); if (length == 0) { status=MagickFalse; break; } size+=length; next_image=GetNextImageInList(next_image); } /* Write the total size */ if (layers_size != (size_t*) NULL) *layers_size=size; if ((size/2) != ((size+1)/2)) rounded_size=size+1; else rounded_size=size; (void) WritePSDSize(psd_info,image,rounded_size,size_offset); layer_size_offsets=(MagickOffsetType *) RelinquishMagickMemory( layer_size_offsets); /* Remove the opacity mask from the registry */ next_image=base_image; while (next_image != (Image *) NULL) { property=GetImageArtifact(next_image,"psd:opacity-mask"); if (property != (const char *) NULL) (void) DeleteImageRegistry(property); next_image=GetNextImageInList(next_image); } return(status); } ModuleExport MagickBooleanType WritePSDLayers(Image * image, const ImageInfo *image_info,const PSDInfo *psd_info,ExceptionInfo *exception) { PolicyDomain domain; PolicyRights rights; domain=CoderPolicyDomain; rights=WritePolicyRights; if (IsRightsAuthorized(domain,rights,"PSD") == MagickFalse) return(MagickTrue); return WritePSDLayersInternal(image,image_info,psd_info,(size_t*) NULL, exception); } static MagickBooleanType WritePSDImage(const ImageInfo *image_info, Image *image,ExceptionInfo *exception) { const StringInfo *icc_profile; MagickBooleanType status; PSDInfo psd_info; register ssize_t i; size_t length, num_channels, packet_size; StringInfo *bim_profile; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception); if (status == MagickFalse) return(status); packet_size=(size_t) (image->depth > 8 ? 6 : 3); if (image->alpha_trait != UndefinedPixelTrait) packet_size+=image->depth > 8 ? 2 : 1; psd_info.version=1; if ((LocaleCompare(image_info->magick,"PSB") == 0) || (image->columns > 30000) || (image->rows > 30000)) psd_info.version=2; (void) WriteBlob(image,4,(const unsigned char *) "8BPS"); (void) WriteBlobMSBShort(image,psd_info.version); /* version */ for (i=1; i <= 6; i++) (void) WriteBlobByte(image, 0); /* 6 bytes of reserved */ /* When the image has a color profile it won't be converted to gray scale */ if ((GetImageProfile(image,"icc") == (StringInfo *) NULL) && (SetImageGray(image,exception) != MagickFalse)) num_channels=(image->alpha_trait != UndefinedPixelTrait ? 2UL : 1UL); else if ((image_info->type != TrueColorType) && (image_info->type != TrueColorAlphaType) && (image->storage_class == PseudoClass)) num_channels=(image->alpha_trait != UndefinedPixelTrait ? 2UL : 1UL); else { if (image->storage_class == PseudoClass) (void) SetImageStorageClass(image,DirectClass,exception); if (image->colorspace != CMYKColorspace) num_channels=(image->alpha_trait != UndefinedPixelTrait ? 4UL : 3UL); else num_channels=(image->alpha_trait != UndefinedPixelTrait ? 5UL : 4UL); } (void) WriteBlobMSBShort(image,(unsigned short) num_channels); (void) WriteBlobMSBLong(image,(unsigned int) image->rows); (void) WriteBlobMSBLong(image,(unsigned int) image->columns); if (IsImageGray(image) != MagickFalse) { MagickBooleanType monochrome; /* Write depth & mode. */ monochrome=IsImageMonochrome(image) && (image->depth == 1) ? MagickTrue : MagickFalse; (void) WriteBlobMSBShort(image,(unsigned short) (monochrome != MagickFalse ? 1 : image->depth > 8 ? 16 : 8)); (void) WriteBlobMSBShort(image,(unsigned short) (monochrome != MagickFalse ? BitmapMode : GrayscaleMode)); } else { (void) WriteBlobMSBShort(image,(unsigned short) (image->storage_class == PseudoClass ? 8 : image->depth > 8 ? 16 : 8)); if (((image_info->colorspace != UndefinedColorspace) || (image->colorspace != CMYKColorspace)) && (image_info->colorspace != CMYKColorspace)) { (void) TransformImageColorspace(image,sRGBColorspace,exception); (void) WriteBlobMSBShort(image,(unsigned short) (image->storage_class == PseudoClass ? IndexedMode : RGBMode)); } else { if (image->colorspace != CMYKColorspace) (void) TransformImageColorspace(image,CMYKColorspace,exception); (void) WriteBlobMSBShort(image,CMYKMode); } } if ((IsImageGray(image) != MagickFalse) || (image->storage_class == DirectClass) || (image->colors > 256)) (void) WriteBlobMSBLong(image,0); else { /* Write PSD raster colormap. */ (void) WriteBlobMSBLong(image,768); for (i=0; i < (ssize_t) image->colors; i++) (void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum( image->colormap[i].red))); for ( ; i < 256; i++) (void) WriteBlobByte(image,0); for (i=0; i < (ssize_t) image->colors; i++) (void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum( image->colormap[i].green))); for ( ; i < 256; i++) (void) WriteBlobByte(image,0); for (i=0; i < (ssize_t) image->colors; i++) (void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum( image->colormap[i].blue))); for ( ; i < 256; i++) (void) WriteBlobByte(image,0); } /* Image resource block. */ length=28; /* 0x03EB */ bim_profile=(StringInfo *) GetImageProfile(image,"8bim"); icc_profile=GetImageProfile(image,"icc"); if (bim_profile != (StringInfo *) NULL) { bim_profile=CloneStringInfo(bim_profile); if (icc_profile != (StringInfo *) NULL) RemoveICCProfileFromResourceBlock(bim_profile); RemoveResolutionFromResourceBlock(bim_profile); length+=PSDQuantum(GetStringInfoLength(bim_profile)); } if (icc_profile != (const StringInfo *) NULL) length+=PSDQuantum(GetStringInfoLength(icc_profile))+12; (void) WriteBlobMSBLong(image,(unsigned int) length); WriteResolutionResourceBlock(image); if (bim_profile != (StringInfo *) NULL) { (void) WriteBlob(image,GetStringInfoLength(bim_profile), GetStringInfoDatum(bim_profile)); bim_profile=DestroyStringInfo(bim_profile); } if (icc_profile != (StringInfo *) NULL) { (void) WriteBlob(image,4,(const unsigned char *) "8BIM"); (void) WriteBlobMSBShort(image,0x0000040F); (void) WriteBlobMSBShort(image,0); (void) WriteBlobMSBLong(image,(unsigned int) GetStringInfoLength( icc_profile)); (void) WriteBlob(image,GetStringInfoLength(icc_profile), GetStringInfoDatum(icc_profile)); if ((ssize_t) GetStringInfoLength(icc_profile) != PSDQuantum(GetStringInfoLength(icc_profile))) (void) WriteBlobByte(image,0); } if (status != MagickFalse) { MagickOffsetType size_offset; size_t size; size_offset=TellBlob(image); (void) SetPSDSize(&psd_info,image,0); status=WritePSDLayersInternal(image,image_info,&psd_info,&size, exception); size_offset+=WritePSDSize(&psd_info,image,size+ (psd_info.version == 1 ? 8 : 12),size_offset); } (void) WriteBlobMSBLong(image,0); /* user mask data */ /* Write composite image. */ if (status != MagickFalse) { CompressionType compression; compression=image->compression; if (image_info->compression != UndefinedCompression) image->compression=image_info->compression; if (image->compression == ZipCompression) image->compression=RLECompression; if (WritePSDChannels(&psd_info,image_info,image,image,0,MagickFalse, exception) == 0) status=MagickFalse; image->compression=compression; } (void) CloseBlob(image); return(status); }
omp-low.c
/* Lowering pass for OpenMP directives. Converts OpenMP directives into explicit calls to the runtime library (libgomp) and data marshalling to implement data sharing and copying clauses. Contributed by Diego Novillo <dnovillo@redhat.com> Copyright (C) 2005, 2006 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #include "config.h" #include "system.h" #include "coretypes.h" #include "tm.h" #include "tree.h" #include "rtl.h" #include "tree-gimple.h" #include "tree-inline.h" #include "langhooks.h" #include "diagnostic.h" #include "tree-flow.h" #include "timevar.h" #include "flags.h" #include "function.h" #include "expr.h" #include "toplev.h" #include "tree-pass.h" #include "ggc.h" #include "except.h" /* Lowering of OpenMP parallel and workshare constructs proceeds in two phases. The first phase scans the function looking for OMP statements and then for variables that must be replaced to satisfy data sharing clauses. The second phase expands code for the constructs, as well as re-gimplifying things when variables have been replaced with complex expressions. Final code generation is done by pass_expand_omp. The flowgraph is scanned for parallel regions which are then moved to a new function, to be invoked by the thread library. */ /* Context structure. Used to store information about each parallel directive in the code. */ typedef struct omp_context { /* This field must be at the beginning, as we do "inheritance": Some callback functions for tree-inline.c (e.g., omp_copy_decl) receive a copy_body_data pointer that is up-casted to an omp_context pointer. */ copy_body_data cb; /* The tree of contexts corresponding to the encountered constructs. */ struct omp_context *outer; tree stmt; /* Map variables to fields in a structure that allows communication between sending and receiving threads. */ splay_tree field_map; tree record_type; tree sender_decl; tree receiver_decl; /* A chain of variables to add to the top-level block surrounding the construct. In the case of a parallel, this is in the child function. */ tree block_vars; /* What to do with variables with implicitly determined sharing attributes. */ enum omp_clause_default_kind default_kind; /* Nesting depth of this context. Used to beautify error messages re invalid gotos. The outermost ctx is depth 1, with depth 0 being reserved for the main body of the function. */ int depth; /* True if this parallel directive is nested within another. */ bool is_nested; } omp_context; /* A structure describing the main elements of a parallel loop. */ struct omp_for_data { tree v, n1, n2, step, chunk_size, for_stmt; enum tree_code cond_code; tree pre; bool have_nowait, have_ordered; enum omp_clause_schedule_kind sched_kind; }; static splay_tree all_contexts; static int parallel_nesting_level; struct omp_region *root_omp_region; static void scan_omp (tree *, omp_context *); static void lower_omp (tree *, omp_context *); static tree lookup_decl_in_outer_ctx (tree, omp_context *); static tree maybe_lookup_decl_in_outer_ctx (tree, omp_context *); /* Find an OpenMP clause of type KIND within CLAUSES. */ static tree find_omp_clause (tree clauses, enum tree_code kind) { for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses)) if (OMP_CLAUSE_CODE (clauses) == kind) return clauses; return NULL_TREE; } /* Return true if CTX is for an omp parallel. */ static inline bool is_parallel_ctx (omp_context *ctx) { return TREE_CODE (ctx->stmt) == OMP_PARALLEL; } /* Return true if REGION is a combined parallel+workshare region. */ static inline bool is_combined_parallel (struct omp_region *region) { return region->is_combined_parallel; } /* Extract the header elements of parallel loop FOR_STMT and store them into *FD. */ static void extract_omp_for_data (tree for_stmt, struct omp_for_data *fd) { tree t; fd->for_stmt = for_stmt; fd->pre = NULL; t = OMP_FOR_INIT (for_stmt); gcc_assert (TREE_CODE (t) == MODIFY_EXPR); fd->v = TREE_OPERAND (t, 0); gcc_assert (DECL_P (fd->v)); gcc_assert (TREE_CODE (TREE_TYPE (fd->v)) == INTEGER_TYPE); fd->n1 = TREE_OPERAND (t, 1); t = OMP_FOR_COND (for_stmt); fd->cond_code = TREE_CODE (t); gcc_assert (TREE_OPERAND (t, 0) == fd->v); fd->n2 = TREE_OPERAND (t, 1); switch (fd->cond_code) { case LT_EXPR: case GT_EXPR: break; case LE_EXPR: fd->n2 = fold_build2 (PLUS_EXPR, TREE_TYPE (fd->n2), fd->n2, build_int_cst (TREE_TYPE (fd->n2), 1)); fd->cond_code = LT_EXPR; break; case GE_EXPR: fd->n2 = fold_build2 (MINUS_EXPR, TREE_TYPE (fd->n2), fd->n2, build_int_cst (TREE_TYPE (fd->n2), 1)); fd->cond_code = GT_EXPR; break; default: gcc_unreachable (); } t = OMP_FOR_INCR (fd->for_stmt); gcc_assert (TREE_CODE (t) == MODIFY_EXPR); gcc_assert (TREE_OPERAND (t, 0) == fd->v); t = TREE_OPERAND (t, 1); gcc_assert (TREE_OPERAND (t, 0) == fd->v); switch (TREE_CODE (t)) { case PLUS_EXPR: fd->step = TREE_OPERAND (t, 1); break; case MINUS_EXPR: fd->step = TREE_OPERAND (t, 1); fd->step = fold_build1 (NEGATE_EXPR, TREE_TYPE (fd->step), fd->step); break; default: gcc_unreachable (); } fd->have_nowait = fd->have_ordered = false; fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC; fd->chunk_size = NULL_TREE; for (t = OMP_FOR_CLAUSES (for_stmt); t ; t = OMP_CLAUSE_CHAIN (t)) switch (OMP_CLAUSE_CODE (t)) { case OMP_CLAUSE_NOWAIT: fd->have_nowait = true; break; case OMP_CLAUSE_ORDERED: fd->have_ordered = true; break; case OMP_CLAUSE_SCHEDULE: fd->sched_kind = OMP_CLAUSE_SCHEDULE_KIND (t); fd->chunk_size = OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t); break; default: break; } if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME) gcc_assert (fd->chunk_size == NULL); else if (fd->chunk_size == NULL) { /* We only need to compute a default chunk size for ordered static loops and dynamic loops. */ if (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC || fd->have_ordered) fd->chunk_size = (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC) ? integer_zero_node : integer_one_node; } } /* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB is the immediate dominator of PAR_ENTRY_BB, return true if there are no data dependencies that would prevent expanding the parallel directive at PAR_ENTRY_BB as a combined parallel+workshare region. When expanding a combined parallel+workshare region, the call to the child function may need additional arguments in the case of OMP_FOR regions. In some cases, these arguments are computed out of variables passed in from the parent to the child via 'struct .omp_data_s'. For instance: #pragma omp parallel for schedule (guided, i * 4) for (j ...) Is lowered into: # BLOCK 2 (PAR_ENTRY_BB) .omp_data_o.i = i; #pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598) # BLOCK 3 (WS_ENTRY_BB) .omp_data_i = &.omp_data_o; D.1667 = .omp_data_i->i; D.1598 = D.1667 * 4; #pragma omp for schedule (guided, D.1598) When we outline the parallel region, the call to the child function 'bar.omp_fn.0' will need the value D.1598 in its argument list, but that value is computed *after* the call site. So, in principle we cannot do the transformation. To see whether the code in WS_ENTRY_BB blocks the combined parallel+workshare call, we collect all the variables used in the OMP_FOR header check whether they appear on the LHS of any statement in WS_ENTRY_BB. If so, then we cannot emit the combined call. FIXME. If we had the SSA form built at this point, we could merely hoist the code in block 3 into block 2 and be done with it. But at this point we don't have dataflow information and though we could hack something up here, it is really not worth the aggravation. */ static bool workshare_safe_to_combine_p (basic_block par_entry_bb, basic_block ws_entry_bb) { struct omp_for_data fd; tree par_stmt, ws_stmt; par_stmt = last_stmt (par_entry_bb); ws_stmt = last_stmt (ws_entry_bb); if (TREE_CODE (ws_stmt) == OMP_SECTIONS) return true; gcc_assert (TREE_CODE (ws_stmt) == OMP_FOR); extract_omp_for_data (ws_stmt, &fd); /* FIXME. We give up too easily here. If any of these arguments are not constants, they will likely involve variables that have been mapped into fields of .omp_data_s for sharing with the child function. With appropriate data flow, it would be possible to see through this. */ if (!is_gimple_min_invariant (fd.n1) || !is_gimple_min_invariant (fd.n2) || !is_gimple_min_invariant (fd.step) || (fd.chunk_size && !is_gimple_min_invariant (fd.chunk_size))) return false; return true; } /* Collect additional arguments needed to emit a combined parallel+workshare call. WS_STMT is the workshare directive being expanded. */ static tree get_ws_args_for (tree ws_stmt) { tree t; if (TREE_CODE (ws_stmt) == OMP_FOR) { struct omp_for_data fd; tree ws_args; extract_omp_for_data (ws_stmt, &fd); ws_args = NULL_TREE; if (fd.chunk_size) { t = fold_convert (long_integer_type_node, fd.chunk_size); ws_args = tree_cons (NULL, t, ws_args); } t = fold_convert (long_integer_type_node, fd.step); ws_args = tree_cons (NULL, t, ws_args); t = fold_convert (long_integer_type_node, fd.n2); ws_args = tree_cons (NULL, t, ws_args); t = fold_convert (long_integer_type_node, fd.n1); ws_args = tree_cons (NULL, t, ws_args); return ws_args; } else if (TREE_CODE (ws_stmt) == OMP_SECTIONS) { basic_block bb = bb_for_stmt (ws_stmt); t = build_int_cst (unsigned_type_node, EDGE_COUNT (bb->succs)); t = tree_cons (NULL, t, NULL); return t; } gcc_unreachable (); } /* Discover whether REGION is a combined parallel+workshare region. */ static void determine_parallel_type (struct omp_region *region) { basic_block par_entry_bb, par_exit_bb; basic_block ws_entry_bb, ws_exit_bb; if (region == NULL || region->inner == NULL || region->exit == NULL || region->inner->exit == NULL) return; /* We only support parallel+for and parallel+sections. */ if (region->type != OMP_PARALLEL || (region->inner->type != OMP_FOR && region->inner->type != OMP_SECTIONS)) return; /* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and WS_EXIT_BB -> PAR_EXIT_BB. */ par_entry_bb = region->entry; par_exit_bb = region->exit; ws_entry_bb = region->inner->entry; ws_exit_bb = region->inner->exit; if (single_succ (par_entry_bb) == ws_entry_bb && single_succ (ws_exit_bb) == par_exit_bb && workshare_safe_to_combine_p (par_entry_bb, ws_entry_bb) && (OMP_PARALLEL_COMBINED (last_stmt (par_entry_bb)) || (last_and_only_stmt (ws_entry_bb) && last_and_only_stmt (par_exit_bb)))) { tree ws_stmt = last_stmt (ws_entry_bb); if (region->inner->type == OMP_FOR) { /* If this is a combined parallel loop, we need to determine whether or not to use the combined library calls. There are two cases where we do not apply the transformation: static loops and any kind of ordered loop. In the first case, we already open code the loop so there is no need to do anything else. In the latter case, the combined parallel loop call would still need extra synchronization to implement ordered semantics, so there would not be any gain in using the combined call. */ tree clauses = OMP_FOR_CLAUSES (ws_stmt); tree c = find_omp_clause (clauses, OMP_CLAUSE_SCHEDULE); if (c == NULL || OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_STATIC || find_omp_clause (clauses, OMP_CLAUSE_ORDERED)) { region->is_combined_parallel = false; region->inner->is_combined_parallel = false; return; } } region->is_combined_parallel = true; region->inner->is_combined_parallel = true; region->ws_args = get_ws_args_for (ws_stmt); } } /* Return true if EXPR is variable sized. */ static inline bool is_variable_sized (tree expr) { return !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (expr))); } /* Return true if DECL is a reference type. */ static inline bool is_reference (tree decl) { return lang_hooks.decls.omp_privatize_by_reference (decl); } /* Lookup variables in the decl or field splay trees. The "maybe" form allows for the variable form to not have been entered, otherwise we assert that the variable must have been entered. */ static inline tree lookup_decl (tree var, omp_context *ctx) { splay_tree_node n; n = splay_tree_lookup (ctx->cb.decl_map, (splay_tree_key) var); return (tree) n->value; } static inline tree maybe_lookup_decl (tree var, omp_context *ctx) { splay_tree_node n; n = splay_tree_lookup (ctx->cb.decl_map, (splay_tree_key) var); return n ? (tree) n->value : NULL_TREE; } static inline tree lookup_field (tree var, omp_context *ctx) { splay_tree_node n; n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var); return (tree) n->value; } static inline tree maybe_lookup_field (tree var, omp_context *ctx) { splay_tree_node n; n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var); return n ? (tree) n->value : NULL_TREE; } /* Return true if DECL should be copied by pointer. SHARED_P is true if DECL is to be shared. */ static bool use_pointer_for_field (tree decl, bool shared_p) { if (AGGREGATE_TYPE_P (TREE_TYPE (decl))) return true; /* We can only use copy-in/copy-out semantics for shared variables when we know the value is not accessible from an outer scope. */ if (shared_p) { /* ??? Trivially accessible from anywhere. But why would we even be passing an address in this case? Should we simply assert this to be false, or should we have a cleanup pass that removes these from the list of mappings? */ if (TREE_STATIC (decl) || DECL_EXTERNAL (decl)) return true; /* For variables with DECL_HAS_VALUE_EXPR_P set, we cannot tell without analyzing the expression whether or not its location is accessible to anyone else. In the case of nested parallel regions it certainly may be. */ if (TREE_CODE (decl) != RESULT_DECL && DECL_HAS_VALUE_EXPR_P (decl)) return true; /* Do not use copy-in/copy-out for variables that have their address taken. */ if (TREE_ADDRESSABLE (decl)) return true; } return false; } /* Construct a new automatic decl similar to VAR. */ static tree omp_copy_decl_2 (tree var, tree name, tree type, omp_context *ctx) { tree copy = build_decl (VAR_DECL, name, type); TREE_ADDRESSABLE (copy) = TREE_ADDRESSABLE (var); DECL_COMPLEX_GIMPLE_REG_P (copy) = DECL_COMPLEX_GIMPLE_REG_P (var); DECL_ARTIFICIAL (copy) = DECL_ARTIFICIAL (var); DECL_IGNORED_P (copy) = DECL_IGNORED_P (var); TREE_USED (copy) = 1; DECL_CONTEXT (copy) = current_function_decl; DECL_SEEN_IN_BIND_EXPR_P (copy) = 1; TREE_CHAIN (copy) = ctx->block_vars; ctx->block_vars = copy; return copy; } static tree omp_copy_decl_1 (tree var, omp_context *ctx) { return omp_copy_decl_2 (var, DECL_NAME (var), TREE_TYPE (var), ctx); } /* Build tree nodes to access the field for VAR on the receiver side. */ static tree build_receiver_ref (tree var, bool by_ref, omp_context *ctx) { tree x, field = lookup_field (var, ctx); /* If the receiver record type was remapped in the child function, remap the field into the new record type. */ x = maybe_lookup_field (field, ctx); if (x != NULL) field = x; x = build_fold_indirect_ref (ctx->receiver_decl); x = build3 (COMPONENT_REF, TREE_TYPE (field), x, field, NULL); if (by_ref) x = build_fold_indirect_ref (x); return x; } /* Build tree nodes to access VAR in the scope outer to CTX. In the case of a parallel, this is a component reference; for workshare constructs this is some variable. */ static tree build_outer_var_ref (tree var, omp_context *ctx) { tree x; if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx))) x = var; else if (is_variable_sized (var)) { x = TREE_OPERAND (DECL_VALUE_EXPR (var), 0); x = build_outer_var_ref (x, ctx); x = build_fold_indirect_ref (x); } else if (is_parallel_ctx (ctx)) { bool by_ref = use_pointer_for_field (var, false); x = build_receiver_ref (var, by_ref, ctx); } else if (ctx->outer) x = lookup_decl (var, ctx->outer); else if (is_reference (var)) /* This can happen with orphaned constructs. If var is reference, it is possible it is shared and as such valid. */ x = var; else gcc_unreachable (); if (is_reference (var)) x = build_fold_indirect_ref (x); return x; } /* Build tree nodes to access the field for VAR on the sender side. */ static tree build_sender_ref (tree var, omp_context *ctx) { tree field = lookup_field (var, ctx); return build3 (COMPONENT_REF, TREE_TYPE (field), ctx->sender_decl, field, NULL); } /* Add a new field for VAR inside the structure CTX->SENDER_DECL. */ static void install_var_field (tree var, bool by_ref, omp_context *ctx) { tree field, type; gcc_assert (!splay_tree_lookup (ctx->field_map, (splay_tree_key) var)); type = TREE_TYPE (var); if (by_ref) type = build_pointer_type (type); field = build_decl (FIELD_DECL, DECL_NAME (var), type); /* Remember what variable this field was created for. This does have a side effect of making dwarf2out ignore this member, so for helpful debugging we clear it later in delete_omp_context. */ DECL_ABSTRACT_ORIGIN (field) = var; insert_field_into_struct (ctx->record_type, field); splay_tree_insert (ctx->field_map, (splay_tree_key) var, (splay_tree_value) field); } static tree install_var_local (tree var, omp_context *ctx) { tree new_var = omp_copy_decl_1 (var, ctx); insert_decl_map (&ctx->cb, var, new_var); return new_var; } /* Adjust the replacement for DECL in CTX for the new context. This means copying the DECL_VALUE_EXPR, and fixing up the type. */ static void fixup_remapped_decl (tree decl, omp_context *ctx, bool private_debug) { tree new_decl, size; new_decl = lookup_decl (decl, ctx); TREE_TYPE (new_decl) = remap_type (TREE_TYPE (decl), &ctx->cb); if ((!TREE_CONSTANT (DECL_SIZE (new_decl)) || private_debug) && DECL_HAS_VALUE_EXPR_P (decl)) { tree ve = DECL_VALUE_EXPR (decl); walk_tree (&ve, copy_body_r, &ctx->cb, NULL); SET_DECL_VALUE_EXPR (new_decl, ve); DECL_HAS_VALUE_EXPR_P (new_decl) = 1; } if (!TREE_CONSTANT (DECL_SIZE (new_decl))) { size = remap_decl (DECL_SIZE (decl), &ctx->cb); if (size == error_mark_node) size = TYPE_SIZE (TREE_TYPE (new_decl)); DECL_SIZE (new_decl) = size; size = remap_decl (DECL_SIZE_UNIT (decl), &ctx->cb); if (size == error_mark_node) size = TYPE_SIZE_UNIT (TREE_TYPE (new_decl)); DECL_SIZE_UNIT (new_decl) = size; } } /* The callback for remap_decl. Search all containing contexts for a mapping of the variable; this avoids having to duplicate the splay tree ahead of time. We know a mapping doesn't already exist in the given context. Create new mappings to implement default semantics. */ static tree omp_copy_decl (tree var, copy_body_data *cb) { omp_context *ctx = (omp_context *) cb; tree new_var; if (TREE_CODE (var) == LABEL_DECL) { new_var = create_artificial_label (); DECL_CONTEXT (new_var) = current_function_decl; insert_decl_map (&ctx->cb, var, new_var); return new_var; } while (!is_parallel_ctx (ctx)) { ctx = ctx->outer; if (ctx == NULL) return var; new_var = maybe_lookup_decl (var, ctx); if (new_var) return new_var; } if (is_global_var (var) || decl_function_context (var) != ctx->cb.src_fn) return var; return error_mark_node; } /* Return the parallel region associated with STMT. */ /* Debugging dumps for parallel regions. */ void dump_omp_region (FILE *, struct omp_region *, int); void debug_omp_region (struct omp_region *); void debug_all_omp_regions (void); /* Dump the parallel region tree rooted at REGION. */ void dump_omp_region (FILE *file, struct omp_region *region, int indent) { fprintf (file, "%*sbb %d: %s\n", indent, "", region->entry->index, tree_code_name[region->type]); if (region->inner) dump_omp_region (file, region->inner, indent + 4); if (region->cont) { fprintf (file, "%*sbb %d: OMP_CONTINUE\n", indent, "", region->cont->index); } if (region->exit) fprintf (file, "%*sbb %d: OMP_RETURN\n", indent, "", region->exit->index); else fprintf (file, "%*s[no exit marker]\n", indent, ""); if (region->next) dump_omp_region (file, region->next, indent); } void debug_omp_region (struct omp_region *region) { dump_omp_region (stderr, region, 0); } void debug_all_omp_regions (void) { dump_omp_region (stderr, root_omp_region, 0); } /* Create a new parallel region starting at STMT inside region PARENT. */ struct omp_region * new_omp_region (basic_block bb, enum tree_code type, struct omp_region *parent) { struct omp_region *region = xcalloc (1, sizeof (*region)); region->outer = parent; region->entry = bb; region->type = type; if (parent) { /* This is a nested region. Add it to the list of inner regions in PARENT. */ region->next = parent->inner; parent->inner = region; } else { /* This is a toplevel region. Add it to the list of toplevel regions in ROOT_OMP_REGION. */ region->next = root_omp_region; root_omp_region = region; } return region; } /* Release the memory associated with the region tree rooted at REGION. */ static void free_omp_region_1 (struct omp_region *region) { struct omp_region *i, *n; for (i = region->inner; i ; i = n) { n = i->next; free_omp_region_1 (i); } free (region); } /* Release the memory for the entire omp region tree. */ void free_omp_regions (void) { struct omp_region *r, *n; for (r = root_omp_region; r ; r = n) { n = r->next; free_omp_region_1 (r); } root_omp_region = NULL; } /* Create a new context, with OUTER_CTX being the surrounding context. */ static omp_context * new_omp_context (tree stmt, omp_context *outer_ctx) { omp_context *ctx = XCNEW (omp_context); splay_tree_insert (all_contexts, (splay_tree_key) stmt, (splay_tree_value) ctx); ctx->stmt = stmt; if (outer_ctx) { ctx->outer = outer_ctx; ctx->cb = outer_ctx->cb; ctx->cb.block = NULL; ctx->depth = outer_ctx->depth + 1; } else { ctx->cb.src_fn = current_function_decl; ctx->cb.dst_fn = current_function_decl; ctx->cb.src_node = cgraph_node (current_function_decl); ctx->cb.dst_node = ctx->cb.src_node; ctx->cb.src_cfun = cfun; ctx->cb.copy_decl = omp_copy_decl; ctx->cb.eh_region = -1; ctx->cb.transform_call_graph_edges = CB_CGE_MOVE; ctx->depth = 1; } ctx->cb.decl_map = splay_tree_new (splay_tree_compare_pointers, 0, 0); return ctx; } /* Destroy a omp_context data structures. Called through the splay tree value delete callback. */ static void delete_omp_context (splay_tree_value value) { omp_context *ctx = (omp_context *) value; splay_tree_delete (ctx->cb.decl_map); if (ctx->field_map) splay_tree_delete (ctx->field_map); /* We hijacked DECL_ABSTRACT_ORIGIN earlier. We need to clear it before it produces corrupt debug information. */ if (ctx->record_type) { tree t; for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t)) DECL_ABSTRACT_ORIGIN (t) = NULL; } XDELETE (ctx); } /* Fix up RECEIVER_DECL with a type that has been remapped to the child context. */ static void fixup_child_record_type (omp_context *ctx) { tree f, type = ctx->record_type; /* ??? It isn't sufficient to just call remap_type here, because variably_modified_type_p doesn't work the way we expect for record types. Testing each field for whether it needs remapping and creating a new record by hand works, however. */ for (f = TYPE_FIELDS (type); f ; f = TREE_CHAIN (f)) if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn)) break; if (f) { tree name, new_fields = NULL; type = lang_hooks.types.make_type (RECORD_TYPE); name = DECL_NAME (TYPE_NAME (ctx->record_type)); name = build_decl (TYPE_DECL, name, type); TYPE_NAME (type) = name; for (f = TYPE_FIELDS (ctx->record_type); f ; f = TREE_CHAIN (f)) { tree new_f = copy_node (f); DECL_CONTEXT (new_f) = type; TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &ctx->cb); TREE_CHAIN (new_f) = new_fields; new_fields = new_f; /* Arrange to be able to look up the receiver field given the sender field. */ splay_tree_insert (ctx->field_map, (splay_tree_key) f, (splay_tree_value) new_f); } TYPE_FIELDS (type) = nreverse (new_fields); layout_type (type); } TREE_TYPE (ctx->receiver_decl) = build_pointer_type (type); } /* Instantiate decls as necessary in CTX to satisfy the data sharing specified by CLAUSES. */ static void scan_sharing_clauses (tree clauses, omp_context *ctx) { tree c, decl; bool scan_array_reductions = false; for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c)) { bool by_ref; switch (OMP_CLAUSE_CODE (c)) { case OMP_CLAUSE_PRIVATE: decl = OMP_CLAUSE_DECL (c); if (!is_variable_sized (decl)) install_var_local (decl, ctx); break; case OMP_CLAUSE_SHARED: gcc_assert (is_parallel_ctx (ctx)); decl = OMP_CLAUSE_DECL (c); gcc_assert (!is_variable_sized (decl)); by_ref = use_pointer_for_field (decl, true); /* Global variables don't need to be copied, the receiver side will use them directly. */ if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx))) break; if (! TREE_READONLY (decl) || TREE_ADDRESSABLE (decl) || by_ref || is_reference (decl)) { install_var_field (decl, by_ref, ctx); install_var_local (decl, ctx); break; } /* We don't need to copy const scalar vars back. */ OMP_CLAUSE_SET_CODE (c, OMP_CLAUSE_FIRSTPRIVATE); goto do_private; case OMP_CLAUSE_LASTPRIVATE: /* Let the corresponding firstprivate clause create the variable. */ if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c)) break; /* FALLTHRU */ case OMP_CLAUSE_FIRSTPRIVATE: case OMP_CLAUSE_REDUCTION: decl = OMP_CLAUSE_DECL (c); do_private: if (is_variable_sized (decl)) break; else if (is_parallel_ctx (ctx) && ! is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx))) { by_ref = use_pointer_for_field (decl, false); install_var_field (decl, by_ref, ctx); } install_var_local (decl, ctx); break; case OMP_CLAUSE_COPYPRIVATE: if (ctx->outer) scan_omp (&OMP_CLAUSE_DECL (c), ctx->outer); /* FALLTHRU */ case OMP_CLAUSE_COPYIN: decl = OMP_CLAUSE_DECL (c); by_ref = use_pointer_for_field (decl, false); install_var_field (decl, by_ref, ctx); break; case OMP_CLAUSE_DEFAULT: ctx->default_kind = OMP_CLAUSE_DEFAULT_KIND (c); break; case OMP_CLAUSE_IF: case OMP_CLAUSE_NUM_THREADS: case OMP_CLAUSE_SCHEDULE: if (ctx->outer) scan_omp (&OMP_CLAUSE_OPERAND (c, 0), ctx->outer); break; case OMP_CLAUSE_NOWAIT: case OMP_CLAUSE_ORDERED: break; default: gcc_unreachable (); } } for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c)) { switch (OMP_CLAUSE_CODE (c)) { case OMP_CLAUSE_LASTPRIVATE: /* Let the corresponding firstprivate clause create the variable. */ if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c)) break; /* FALLTHRU */ case OMP_CLAUSE_PRIVATE: case OMP_CLAUSE_FIRSTPRIVATE: case OMP_CLAUSE_REDUCTION: decl = OMP_CLAUSE_DECL (c); if (is_variable_sized (decl)) install_var_local (decl, ctx); fixup_remapped_decl (decl, ctx, OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE && OMP_CLAUSE_PRIVATE_DEBUG (c)); if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)) scan_array_reductions = true; break; case OMP_CLAUSE_SHARED: decl = OMP_CLAUSE_DECL (c); if (! is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx))) fixup_remapped_decl (decl, ctx, false); break; case OMP_CLAUSE_COPYPRIVATE: case OMP_CLAUSE_COPYIN: case OMP_CLAUSE_DEFAULT: case OMP_CLAUSE_IF: case OMP_CLAUSE_NUM_THREADS: case OMP_CLAUSE_SCHEDULE: case OMP_CLAUSE_NOWAIT: case OMP_CLAUSE_ORDERED: break; default: gcc_unreachable (); } } if (scan_array_reductions) for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c)) if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)) { scan_omp (&OMP_CLAUSE_REDUCTION_INIT (c), ctx); scan_omp (&OMP_CLAUSE_REDUCTION_MERGE (c), ctx); } } /* Create a new name for omp child function. Returns an identifier. */ static GTY(()) unsigned int tmp_ompfn_id_num; static tree create_omp_child_function_name (void) { tree name = DECL_ASSEMBLER_NAME (current_function_decl); size_t len = IDENTIFIER_LENGTH (name); char *tmp_name, *prefix; prefix = alloca (len + sizeof ("_omp_fn")); memcpy (prefix, IDENTIFIER_POINTER (name), len); strcpy (prefix + len, "_omp_fn"); #ifndef NO_DOT_IN_LABEL prefix[len] = '.'; #elif !defined NO_DOLLAR_IN_LABEL prefix[len] = '$'; #endif ASM_FORMAT_PRIVATE_NAME (tmp_name, prefix, tmp_ompfn_id_num++); return get_identifier (tmp_name); } /* Build a decl for the omp child function. It'll not contain a body yet, just the bare decl. */ static void create_omp_child_function (omp_context *ctx) { tree decl, type, name, t; name = create_omp_child_function_name (); type = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE); decl = build_decl (FUNCTION_DECL, name, type); decl = lang_hooks.decls.pushdecl (decl); ctx->cb.dst_fn = decl; TREE_STATIC (decl) = 1; TREE_USED (decl) = 1; DECL_ARTIFICIAL (decl) = 1; DECL_IGNORED_P (decl) = 0; TREE_PUBLIC (decl) = 0; DECL_UNINLINABLE (decl) = 1; DECL_EXTERNAL (decl) = 0; DECL_CONTEXT (decl) = NULL_TREE; DECL_INITIAL (decl) = make_node (BLOCK); t = build_decl (RESULT_DECL, NULL_TREE, void_type_node); DECL_ARTIFICIAL (t) = 1; DECL_IGNORED_P (t) = 1; DECL_RESULT (decl) = t; t = build_decl (PARM_DECL, get_identifier (".omp_data_i"), ptr_type_node); DECL_ARTIFICIAL (t) = 1; DECL_ARG_TYPE (t) = ptr_type_node; DECL_CONTEXT (t) = current_function_decl; TREE_USED (t) = 1; DECL_ARGUMENTS (decl) = t; ctx->receiver_decl = t; /* Allocate memory for the function structure. The call to allocate_struct_function clobbers CFUN, so we need to restore it afterward. */ allocate_struct_function (decl); DECL_SOURCE_LOCATION (decl) = EXPR_LOCATION (ctx->stmt); cfun->function_end_locus = EXPR_LOCATION (ctx->stmt); cfun = ctx->cb.src_cfun; } /* Scan an OpenMP parallel directive. */ static void scan_omp_parallel (tree *stmt_p, omp_context *outer_ctx) { omp_context *ctx; tree name; /* Ignore parallel directives with empty bodies, unless there are copyin clauses. */ if (optimize > 0 && empty_body_p (OMP_PARALLEL_BODY (*stmt_p)) && find_omp_clause (OMP_CLAUSES (*stmt_p), OMP_CLAUSE_COPYIN) == NULL) { *stmt_p = build_empty_stmt (); return; } ctx = new_omp_context (*stmt_p, outer_ctx); if (parallel_nesting_level > 1) ctx->is_nested = true; ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0); ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED; ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE); name = create_tmp_var_name (".omp_data_s"); name = build_decl (TYPE_DECL, name, ctx->record_type); TYPE_NAME (ctx->record_type) = name; create_omp_child_function (ctx); OMP_PARALLEL_FN (*stmt_p) = ctx->cb.dst_fn; scan_sharing_clauses (OMP_PARALLEL_CLAUSES (*stmt_p), ctx); scan_omp (&OMP_PARALLEL_BODY (*stmt_p), ctx); if (TYPE_FIELDS (ctx->record_type) == NULL) ctx->record_type = ctx->receiver_decl = NULL; else { layout_type (ctx->record_type); fixup_child_record_type (ctx); } } /* Scan an OpenMP loop directive. */ static void scan_omp_for (tree *stmt_p, omp_context *outer_ctx) { omp_context *ctx; tree stmt; stmt = *stmt_p; ctx = new_omp_context (stmt, outer_ctx); scan_sharing_clauses (OMP_FOR_CLAUSES (stmt), ctx); scan_omp (&OMP_FOR_PRE_BODY (stmt), ctx); scan_omp (&OMP_FOR_INIT (stmt), ctx); scan_omp (&OMP_FOR_COND (stmt), ctx); scan_omp (&OMP_FOR_INCR (stmt), ctx); scan_omp (&OMP_FOR_BODY (stmt), ctx); } /* Scan an OpenMP sections directive. */ static void scan_omp_sections (tree *stmt_p, omp_context *outer_ctx) { tree stmt; omp_context *ctx; stmt = *stmt_p; ctx = new_omp_context (stmt, outer_ctx); scan_sharing_clauses (OMP_SECTIONS_CLAUSES (stmt), ctx); scan_omp (&OMP_SECTIONS_BODY (stmt), ctx); } /* Scan an OpenMP single directive. */ static void scan_omp_single (tree *stmt_p, omp_context *outer_ctx) { tree stmt = *stmt_p; omp_context *ctx; tree name; ctx = new_omp_context (stmt, outer_ctx); ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0); ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE); name = create_tmp_var_name (".omp_copy_s"); name = build_decl (TYPE_DECL, name, ctx->record_type); TYPE_NAME (ctx->record_type) = name; scan_sharing_clauses (OMP_SINGLE_CLAUSES (stmt), ctx); scan_omp (&OMP_SINGLE_BODY (stmt), ctx); if (TYPE_FIELDS (ctx->record_type) == NULL) ctx->record_type = NULL; else layout_type (ctx->record_type); } /* Check OpenMP nesting restrictions. */ static void check_omp_nesting_restrictions (tree t, omp_context *ctx) { switch (TREE_CODE (t)) { case OMP_FOR: case OMP_SECTIONS: case OMP_SINGLE: for (; ctx != NULL; ctx = ctx->outer) switch (TREE_CODE (ctx->stmt)) { case OMP_FOR: case OMP_SECTIONS: case OMP_SINGLE: case OMP_ORDERED: case OMP_MASTER: warning (0, "work-sharing region may not be closely nested inside " "of work-sharing, critical, ordered or master region"); return; case OMP_PARALLEL: return; default: break; } break; case OMP_MASTER: for (; ctx != NULL; ctx = ctx->outer) switch (TREE_CODE (ctx->stmt)) { case OMP_FOR: case OMP_SECTIONS: case OMP_SINGLE: warning (0, "master region may not be closely nested inside " "of work-sharing region"); return; case OMP_PARALLEL: return; default: break; } break; case OMP_ORDERED: for (; ctx != NULL; ctx = ctx->outer) switch (TREE_CODE (ctx->stmt)) { case OMP_CRITICAL: warning (0, "ordered region may not be closely nested inside " "of critical region"); return; case OMP_FOR: if (find_omp_clause (OMP_CLAUSES (ctx->stmt), OMP_CLAUSE_ORDERED) == NULL) warning (0, "ordered region must be closely nested inside " "a loop region with an ordered clause"); return; case OMP_PARALLEL: return; default: break; } break; case OMP_CRITICAL: for (; ctx != NULL; ctx = ctx->outer) if (TREE_CODE (ctx->stmt) == OMP_CRITICAL && OMP_CRITICAL_NAME (t) == OMP_CRITICAL_NAME (ctx->stmt)) { warning (0, "critical region may not be nested inside a critical " "region with the same name"); return; } break; default: break; } } /* Callback for walk_stmts used to scan for OpenMP directives at TP. */ static tree scan_omp_1 (tree *tp, int *walk_subtrees, void *data) { struct walk_stmt_info *wi = data; omp_context *ctx = wi->info; tree t = *tp; if (EXPR_HAS_LOCATION (t)) input_location = EXPR_LOCATION (t); /* Check the OpenMP nesting restrictions. */ if (OMP_DIRECTIVE_P (t) && ctx != NULL) check_omp_nesting_restrictions (t, ctx); *walk_subtrees = 0; switch (TREE_CODE (t)) { case OMP_PARALLEL: parallel_nesting_level++; scan_omp_parallel (tp, ctx); parallel_nesting_level--; break; case OMP_FOR: scan_omp_for (tp, ctx); break; case OMP_SECTIONS: scan_omp_sections (tp, ctx); break; case OMP_SINGLE: scan_omp_single (tp, ctx); break; case OMP_SECTION: case OMP_MASTER: case OMP_ORDERED: case OMP_CRITICAL: ctx = new_omp_context (*tp, ctx); scan_omp (&OMP_BODY (*tp), ctx); break; case BIND_EXPR: { tree var; *walk_subtrees = 1; for (var = BIND_EXPR_VARS (t); var ; var = TREE_CHAIN (var)) insert_decl_map (&ctx->cb, var, var); } break; case VAR_DECL: case PARM_DECL: case LABEL_DECL: case RESULT_DECL: if (ctx) *tp = remap_decl (t, &ctx->cb); break; default: if (ctx && TYPE_P (t)) *tp = remap_type (t, &ctx->cb); else if (!DECL_P (t)) *walk_subtrees = 1; break; } return NULL_TREE; } /* Scan all the statements starting at STMT_P. CTX contains context information about the OpenMP directives and clauses found during the scan. */ static void scan_omp (tree *stmt_p, omp_context *ctx) { location_t saved_location; struct walk_stmt_info wi; memset (&wi, 0, sizeof (wi)); wi.callback = scan_omp_1; wi.info = ctx; wi.want_bind_expr = (ctx != NULL); wi.want_locations = true; saved_location = input_location; walk_stmts (&wi, stmt_p); input_location = saved_location; } /* Re-gimplification and code generation routines. */ /* Build a call to GOMP_barrier. */ static void build_omp_barrier (tree *stmt_list) { tree t; t = built_in_decls[BUILT_IN_GOMP_BARRIER]; t = build_function_call_expr (t, NULL); gimplify_and_add (t, stmt_list); } /* If a context was created for STMT when it was scanned, return it. */ static omp_context * maybe_lookup_ctx (tree stmt) { splay_tree_node n; n = splay_tree_lookup (all_contexts, (splay_tree_key) stmt); return n ? (omp_context *) n->value : NULL; } /* Find the mapping for DECL in CTX or the immediately enclosing context that has a mapping for DECL. If CTX is a nested parallel directive, we may have to use the decl mappings created in CTX's parent context. Suppose that we have the following parallel nesting (variable UIDs showed for clarity): iD.1562 = 0; #omp parallel shared(iD.1562) -> outer parallel iD.1562 = iD.1562 + 1; #omp parallel shared (iD.1562) -> inner parallel iD.1562 = iD.1562 - 1; Each parallel structure will create a distinct .omp_data_s structure for copying iD.1562 in/out of the directive: outer parallel .omp_data_s.1.i -> iD.1562 inner parallel .omp_data_s.2.i -> iD.1562 A shared variable mapping will produce a copy-out operation before the parallel directive and a copy-in operation after it. So, in this case we would have: iD.1562 = 0; .omp_data_o.1.i = iD.1562; #omp parallel shared(iD.1562) -> outer parallel .omp_data_i.1 = &.omp_data_o.1 .omp_data_i.1->i = .omp_data_i.1->i + 1; .omp_data_o.2.i = iD.1562; -> ** #omp parallel shared(iD.1562) -> inner parallel .omp_data_i.2 = &.omp_data_o.2 .omp_data_i.2->i = .omp_data_i.2->i - 1; ** This is a problem. The symbol iD.1562 cannot be referenced inside the body of the outer parallel region. But since we are emitting this copy operation while expanding the inner parallel directive, we need to access the CTX structure of the outer parallel directive to get the correct mapping: .omp_data_o.2.i = .omp_data_i.1->i Since there may be other workshare or parallel directives enclosing the parallel directive, it may be necessary to walk up the context parent chain. This is not a problem in general because nested parallelism happens only rarely. */ static tree lookup_decl_in_outer_ctx (tree decl, omp_context *ctx) { tree t; omp_context *up; gcc_assert (ctx->is_nested); for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer) t = maybe_lookup_decl (decl, up); gcc_assert (t || is_global_var (decl)); return t ? t : decl; } /* Similar to lookup_decl_in_outer_ctx, but return DECL if not found in outer contexts. */ static tree maybe_lookup_decl_in_outer_ctx (tree decl, omp_context *ctx) { tree t = NULL; omp_context *up; if (ctx->is_nested) for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer) t = maybe_lookup_decl (decl, up); return t ? t : decl; } /* Construct the initialization value for reduction CLAUSE. */ tree omp_reduction_init (tree clause, tree type) { switch (OMP_CLAUSE_REDUCTION_CODE (clause)) { case PLUS_EXPR: case MINUS_EXPR: case BIT_IOR_EXPR: case BIT_XOR_EXPR: case TRUTH_OR_EXPR: case TRUTH_ORIF_EXPR: case TRUTH_XOR_EXPR: case NE_EXPR: return fold_convert (type, integer_zero_node); case MULT_EXPR: case TRUTH_AND_EXPR: case TRUTH_ANDIF_EXPR: case EQ_EXPR: return fold_convert (type, integer_one_node); case BIT_AND_EXPR: return fold_convert (type, integer_minus_one_node); case MAX_EXPR: if (SCALAR_FLOAT_TYPE_P (type)) { REAL_VALUE_TYPE max, min; if (HONOR_INFINITIES (TYPE_MODE (type))) { real_inf (&max); real_arithmetic (&min, NEGATE_EXPR, &max, NULL); } else real_maxval (&min, 1, TYPE_MODE (type)); return build_real (type, min); } else { gcc_assert (INTEGRAL_TYPE_P (type)); return TYPE_MIN_VALUE (type); } case MIN_EXPR: if (SCALAR_FLOAT_TYPE_P (type)) { REAL_VALUE_TYPE max; if (HONOR_INFINITIES (TYPE_MODE (type))) real_inf (&max); else real_maxval (&max, 0, TYPE_MODE (type)); return build_real (type, max); } else { gcc_assert (INTEGRAL_TYPE_P (type)); return TYPE_MAX_VALUE (type); } default: gcc_unreachable (); } } /* Generate code to implement the input clauses, FIRSTPRIVATE and COPYIN, from the receiver (aka child) side and initializers for REFERENCE_TYPE private variables. Initialization statements go in ILIST, while calls to destructors go in DLIST. */ static void lower_rec_input_clauses (tree clauses, tree *ilist, tree *dlist, omp_context *ctx) { tree_stmt_iterator diter; tree c, dtor, copyin_seq, x, args, ptr; bool copyin_by_ref = false; bool lastprivate_firstprivate = false; int pass; *dlist = alloc_stmt_list (); diter = tsi_start (*dlist); copyin_seq = NULL; /* Do all the fixed sized types in the first pass, and the variable sized types in the second pass. This makes sure that the scalar arguments to the variable sized types are processed before we use them in the variable sized operations. */ for (pass = 0; pass < 2; ++pass) { for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c)) { enum omp_clause_code c_kind = OMP_CLAUSE_CODE (c); tree var, new_var; bool by_ref; switch (c_kind) { case OMP_CLAUSE_PRIVATE: if (OMP_CLAUSE_PRIVATE_DEBUG (c)) continue; break; case OMP_CLAUSE_SHARED: if (maybe_lookup_decl (OMP_CLAUSE_DECL (c), ctx) == NULL) { gcc_assert (is_global_var (OMP_CLAUSE_DECL (c))); continue; } case OMP_CLAUSE_FIRSTPRIVATE: case OMP_CLAUSE_COPYIN: case OMP_CLAUSE_REDUCTION: break; case OMP_CLAUSE_LASTPRIVATE: if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c)) { lastprivate_firstprivate = true; if (pass != 0) continue; } break; default: continue; } new_var = var = OMP_CLAUSE_DECL (c); if (c_kind != OMP_CLAUSE_COPYIN) new_var = lookup_decl (var, ctx); if (c_kind == OMP_CLAUSE_SHARED || c_kind == OMP_CLAUSE_COPYIN) { if (pass != 0) continue; } else if (is_variable_sized (var)) { /* For variable sized types, we need to allocate the actual storage here. Call alloca and store the result in the pointer decl that we created elsewhere. */ if (pass == 0) continue; ptr = DECL_VALUE_EXPR (new_var); gcc_assert (TREE_CODE (ptr) == INDIRECT_REF); ptr = TREE_OPERAND (ptr, 0); gcc_assert (DECL_P (ptr)); x = TYPE_SIZE_UNIT (TREE_TYPE (new_var)); args = tree_cons (NULL, x, NULL); x = built_in_decls[BUILT_IN_ALLOCA]; x = build_function_call_expr (x, args); x = fold_convert (TREE_TYPE (ptr), x); x = build2 (MODIFY_EXPR, void_type_node, ptr, x); gimplify_and_add (x, ilist); } else if (is_reference (var)) { /* For references that are being privatized for Fortran, allocate new backing storage for the new pointer variable. This allows us to avoid changing all the code that expects a pointer to something that expects a direct variable. Note that this doesn't apply to C++, since reference types are disallowed in data sharing clauses there, except for NRV optimized return values. */ if (pass == 0) continue; x = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var))); if (TREE_CONSTANT (x)) { const char *name = NULL; if (DECL_NAME (var)) name = IDENTIFIER_POINTER (DECL_NAME (new_var)); x = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var)), name); gimple_add_tmp_var (x); x = build_fold_addr_expr_with_type (x, TREE_TYPE (new_var)); } else { args = tree_cons (NULL, x, NULL); x = built_in_decls[BUILT_IN_ALLOCA]; x = build_function_call_expr (x, args); x = fold_convert (TREE_TYPE (new_var), x); } x = build2 (MODIFY_EXPR, void_type_node, new_var, x); gimplify_and_add (x, ilist); new_var = build_fold_indirect_ref (new_var); } else if (c_kind == OMP_CLAUSE_REDUCTION && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)) { if (pass == 0) continue; } else if (pass != 0) continue; switch (OMP_CLAUSE_CODE (c)) { case OMP_CLAUSE_SHARED: /* Shared global vars are just accessed directly. */ if (is_global_var (new_var)) break; /* Set up the DECL_VALUE_EXPR for shared variables now. This needs to be delayed until after fixup_child_record_type so that we get the correct type during the dereference. */ by_ref = use_pointer_for_field (var, true); x = build_receiver_ref (var, by_ref, ctx); SET_DECL_VALUE_EXPR (new_var, x); DECL_HAS_VALUE_EXPR_P (new_var) = 1; /* ??? If VAR is not passed by reference, and the variable hasn't been initialized yet, then we'll get a warning for the store into the omp_data_s structure. Ideally, we'd be able to notice this and not store anything at all, but we're generating code too early. Suppress the warning. */ if (!by_ref) TREE_NO_WARNING (var) = 1; break; case OMP_CLAUSE_LASTPRIVATE: if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c)) break; /* FALLTHRU */ case OMP_CLAUSE_PRIVATE: x = lang_hooks.decls.omp_clause_default_ctor (c, new_var); if (x) gimplify_and_add (x, ilist); /* FALLTHRU */ do_dtor: x = lang_hooks.decls.omp_clause_dtor (c, new_var); if (x) { dtor = x; gimplify_stmt (&dtor); tsi_link_before (&diter, dtor, TSI_SAME_STMT); } break; case OMP_CLAUSE_FIRSTPRIVATE: x = build_outer_var_ref (var, ctx); x = lang_hooks.decls.omp_clause_copy_ctor (c, new_var, x); gimplify_and_add (x, ilist); goto do_dtor; break; case OMP_CLAUSE_COPYIN: by_ref = use_pointer_for_field (var, false); x = build_receiver_ref (var, by_ref, ctx); x = lang_hooks.decls.omp_clause_assign_op (c, new_var, x); append_to_statement_list (x, &copyin_seq); copyin_by_ref |= by_ref; break; case OMP_CLAUSE_REDUCTION: if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)) { gimplify_and_add (OMP_CLAUSE_REDUCTION_INIT (c), ilist); OMP_CLAUSE_REDUCTION_INIT (c) = NULL; } else { x = omp_reduction_init (c, TREE_TYPE (new_var)); gcc_assert (TREE_CODE (TREE_TYPE (new_var)) != ARRAY_TYPE); x = build2 (MODIFY_EXPR, void_type_node, new_var, x); gimplify_and_add (x, ilist); } break; default: gcc_unreachable (); } } } /* The copyin sequence is not to be executed by the main thread, since that would result in self-copies. Perhaps not visible to scalars, but it certainly is to C++ operator=. */ if (copyin_seq) { x = built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM]; x = build_function_call_expr (x, NULL); x = build2 (NE_EXPR, boolean_type_node, x, build_int_cst (TREE_TYPE (x), 0)); x = build3 (COND_EXPR, void_type_node, x, copyin_seq, NULL); gimplify_and_add (x, ilist); } /* If any copyin variable is passed by reference, we must ensure the master thread doesn't modify it before it is copied over in all threads. Similarly for variables in both firstprivate and lastprivate clauses we need to ensure the lastprivate copying happens after firstprivate copying in all threads. */ if (copyin_by_ref || lastprivate_firstprivate) build_omp_barrier (ilist); } /* Generate code to implement the LASTPRIVATE clauses. This is used for both parallel and workshare constructs. PREDICATE may be NULL if it's always true. */ static void lower_lastprivate_clauses (tree clauses, tree predicate, tree *stmt_list, omp_context *ctx) { tree sub_list, x, c; /* Early exit if there are no lastprivate clauses. */ clauses = find_omp_clause (clauses, OMP_CLAUSE_LASTPRIVATE); if (clauses == NULL) { /* If this was a workshare clause, see if it had been combined with its parallel. In that case, look for the clauses on the parallel statement itself. */ if (is_parallel_ctx (ctx)) return; ctx = ctx->outer; if (ctx == NULL || !is_parallel_ctx (ctx)) return; clauses = find_omp_clause (OMP_PARALLEL_CLAUSES (ctx->stmt), OMP_CLAUSE_LASTPRIVATE); if (clauses == NULL) return; } sub_list = alloc_stmt_list (); for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c)) { tree var, new_var; if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_LASTPRIVATE) continue; var = OMP_CLAUSE_DECL (c); new_var = lookup_decl (var, ctx); x = build_outer_var_ref (var, ctx); if (is_reference (var)) new_var = build_fold_indirect_ref (new_var); x = lang_hooks.decls.omp_clause_assign_op (c, x, new_var); append_to_statement_list (x, &sub_list); } if (predicate) x = build3 (COND_EXPR, void_type_node, predicate, sub_list, NULL); else x = sub_list; gimplify_and_add (x, stmt_list); } /* Generate code to implement the REDUCTION clauses. */ static void lower_reduction_clauses (tree clauses, tree *stmt_list, omp_context *ctx) { tree sub_list = NULL, x, c; int count = 0; /* First see if there is exactly one reduction clause. Use OMP_ATOMIC update in that case, otherwise use a lock. */ for (c = clauses; c && count < 2; c = OMP_CLAUSE_CHAIN (c)) if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION) { if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)) { /* Never use OMP_ATOMIC for array reductions. */ count = -1; break; } count++; } if (count == 0) return; for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c)) { tree var, ref, new_var; enum tree_code code; if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION) continue; var = OMP_CLAUSE_DECL (c); new_var = lookup_decl (var, ctx); if (is_reference (var)) new_var = build_fold_indirect_ref (new_var); ref = build_outer_var_ref (var, ctx); code = OMP_CLAUSE_REDUCTION_CODE (c); /* reduction(-:var) sums up the partial results, so it acts identically to reduction(+:var). */ if (code == MINUS_EXPR) code = PLUS_EXPR; if (count == 1) { tree addr = build_fold_addr_expr (ref); addr = save_expr (addr); ref = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (addr)), addr); x = fold_build2 (code, TREE_TYPE (ref), ref, new_var); x = build2 (OMP_ATOMIC, void_type_node, addr, x); gimplify_and_add (x, stmt_list); return; } if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)) { tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c); if (is_reference (var)) ref = build_fold_addr_expr (ref); SET_DECL_VALUE_EXPR (placeholder, ref); DECL_HAS_VALUE_EXPR_P (placeholder) = 1; gimplify_and_add (OMP_CLAUSE_REDUCTION_MERGE (c), &sub_list); OMP_CLAUSE_REDUCTION_MERGE (c) = NULL; OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL; } else { x = build2 (code, TREE_TYPE (ref), ref, new_var); ref = build_outer_var_ref (var, ctx); x = build2 (MODIFY_EXPR, void_type_node, ref, x); append_to_statement_list (x, &sub_list); } } x = built_in_decls[BUILT_IN_GOMP_ATOMIC_START]; x = build_function_call_expr (x, NULL); gimplify_and_add (x, stmt_list); gimplify_and_add (sub_list, stmt_list); x = built_in_decls[BUILT_IN_GOMP_ATOMIC_END]; x = build_function_call_expr (x, NULL); gimplify_and_add (x, stmt_list); } /* Generate code to implement the COPYPRIVATE clauses. */ static void lower_copyprivate_clauses (tree clauses, tree *slist, tree *rlist, omp_context *ctx) { tree c; for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c)) { tree var, ref, x; bool by_ref; if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYPRIVATE) continue; var = OMP_CLAUSE_DECL (c); by_ref = use_pointer_for_field (var, false); ref = build_sender_ref (var, ctx); x = (ctx->is_nested) ? lookup_decl_in_outer_ctx (var, ctx) : var; x = by_ref ? build_fold_addr_expr (x) : x; x = build2 (MODIFY_EXPR, void_type_node, ref, x); gimplify_and_add (x, slist); ref = build_receiver_ref (var, by_ref, ctx); if (is_reference (var)) { ref = build_fold_indirect_ref (ref); var = build_fold_indirect_ref (var); } x = lang_hooks.decls.omp_clause_assign_op (c, var, ref); gimplify_and_add (x, rlist); } } /* Generate code to implement the clauses, FIRSTPRIVATE, COPYIN, LASTPRIVATE, and REDUCTION from the sender (aka parent) side. */ static void lower_send_clauses (tree clauses, tree *ilist, tree *olist, omp_context *ctx) { tree c; for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c)) { tree val, ref, x, var; bool by_ref, do_in = false, do_out = false; switch (OMP_CLAUSE_CODE (c)) { case OMP_CLAUSE_FIRSTPRIVATE: case OMP_CLAUSE_COPYIN: case OMP_CLAUSE_LASTPRIVATE: case OMP_CLAUSE_REDUCTION: break; default: continue; } var = val = OMP_CLAUSE_DECL (c); if (ctx->is_nested) var = lookup_decl_in_outer_ctx (val, ctx); if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYIN && is_global_var (var)) continue; if (is_variable_sized (val)) continue; by_ref = use_pointer_for_field (val, false); switch (OMP_CLAUSE_CODE (c)) { case OMP_CLAUSE_FIRSTPRIVATE: case OMP_CLAUSE_COPYIN: do_in = true; break; case OMP_CLAUSE_LASTPRIVATE: if (by_ref || is_reference (val)) { if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c)) continue; do_in = true; } else do_out = true; break; case OMP_CLAUSE_REDUCTION: do_in = true; do_out = !(by_ref || is_reference (val)); break; default: gcc_unreachable (); } if (do_in) { ref = build_sender_ref (val, ctx); x = by_ref ? build_fold_addr_expr (var) : var; x = build2 (MODIFY_EXPR, void_type_node, ref, x); gimplify_and_add (x, ilist); } if (do_out) { ref = build_sender_ref (val, ctx); x = build2 (MODIFY_EXPR, void_type_node, var, ref); gimplify_and_add (x, olist); } } } /* Generate code to implement SHARED from the sender (aka parent) side. This is trickier, since OMP_PARALLEL_CLAUSES doesn't list things that got automatically shared. */ static void lower_send_shared_vars (tree *ilist, tree *olist, omp_context *ctx) { tree var, ovar, nvar, f, x; if (ctx->record_type == NULL) return; for (f = TYPE_FIELDS (ctx->record_type); f ; f = TREE_CHAIN (f)) { ovar = DECL_ABSTRACT_ORIGIN (f); nvar = maybe_lookup_decl (ovar, ctx); if (!nvar || !DECL_HAS_VALUE_EXPR_P (nvar)) continue; var = ovar; /* If CTX is a nested parallel directive. Find the immediately enclosing parallel or workshare construct that contains a mapping for OVAR. */ if (ctx->is_nested) var = lookup_decl_in_outer_ctx (ovar, ctx); if (use_pointer_for_field (ovar, true)) { x = build_sender_ref (ovar, ctx); var = build_fold_addr_expr (var); x = build2 (MODIFY_EXPR, void_type_node, x, var); gimplify_and_add (x, ilist); } else { x = build_sender_ref (ovar, ctx); x = build2 (MODIFY_EXPR, void_type_node, x, var); gimplify_and_add (x, ilist); x = build_sender_ref (ovar, ctx); x = build2 (MODIFY_EXPR, void_type_node, var, x); gimplify_and_add (x, olist); } } } /* Build the function calls to GOMP_parallel_start etc to actually generate the parallel operation. REGION is the parallel region being expanded. BB is the block where to insert the code. WS_ARGS will be set if this is a call to a combined parallel+workshare construct, it contains the list of additional arguments needed by the workshare construct. */ static void expand_parallel_call (struct omp_region *region, basic_block bb, tree entry_stmt, tree ws_args) { tree t, args, val, cond, c, list, clauses; block_stmt_iterator si; int start_ix; clauses = OMP_PARALLEL_CLAUSES (entry_stmt); push_gimplify_context (); /* Determine what flavor of GOMP_parallel_start we will be emitting. */ start_ix = BUILT_IN_GOMP_PARALLEL_START; if (is_combined_parallel (region)) { switch (region->inner->type) { case OMP_FOR: start_ix = BUILT_IN_GOMP_PARALLEL_LOOP_STATIC_START + region->inner->sched_kind; break; case OMP_SECTIONS: start_ix = BUILT_IN_GOMP_PARALLEL_SECTIONS_START; break; default: gcc_unreachable (); } } /* By default, the value of NUM_THREADS is zero (selected at run time) and there is no conditional. */ cond = NULL_TREE; val = build_int_cst (unsigned_type_node, 0); c = find_omp_clause (clauses, OMP_CLAUSE_IF); if (c) cond = OMP_CLAUSE_IF_EXPR (c); c = find_omp_clause (clauses, OMP_CLAUSE_NUM_THREADS); if (c) val = OMP_CLAUSE_NUM_THREADS_EXPR (c); /* Ensure 'val' is of the correct type. */ val = fold_convert (unsigned_type_node, val); /* If we found the clause 'if (cond)', build either (cond != 0) or (cond ? val : 1u). */ if (cond) { block_stmt_iterator si; cond = gimple_boolify (cond); if (integer_zerop (val)) val = build2 (EQ_EXPR, unsigned_type_node, cond, build_int_cst (TREE_TYPE (cond), 0)); else { basic_block cond_bb, then_bb, else_bb; edge e; tree t, then_lab, else_lab, tmp; tmp = create_tmp_var (TREE_TYPE (val), NULL); e = split_block (bb, NULL); cond_bb = e->src; bb = e->dest; remove_edge (e); then_bb = create_empty_bb (cond_bb); else_bb = create_empty_bb (then_bb); then_lab = create_artificial_label (); else_lab = create_artificial_label (); t = build3 (COND_EXPR, void_type_node, cond, build_and_jump (&then_lab), build_and_jump (&else_lab)); si = bsi_start (cond_bb); bsi_insert_after (&si, t, BSI_CONTINUE_LINKING); si = bsi_start (then_bb); t = build1 (LABEL_EXPR, void_type_node, then_lab); bsi_insert_after (&si, t, BSI_CONTINUE_LINKING); t = build2 (MODIFY_EXPR, void_type_node, tmp, val); bsi_insert_after (&si, t, BSI_CONTINUE_LINKING); si = bsi_start (else_bb); t = build1 (LABEL_EXPR, void_type_node, else_lab); bsi_insert_after (&si, t, BSI_CONTINUE_LINKING); t = build2 (MODIFY_EXPR, void_type_node, tmp, build_int_cst (unsigned_type_node, 1)); bsi_insert_after (&si, t, BSI_CONTINUE_LINKING); make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE); make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE); make_edge (then_bb, bb, EDGE_FALLTHRU); make_edge (else_bb, bb, EDGE_FALLTHRU); val = tmp; } list = NULL_TREE; val = get_formal_tmp_var (val, &list); si = bsi_start (bb); bsi_insert_after (&si, list, BSI_CONTINUE_LINKING); } list = NULL_TREE; args = tree_cons (NULL, val, NULL); t = OMP_PARALLEL_DATA_ARG (entry_stmt); if (t == NULL) t = null_pointer_node; else t = build_fold_addr_expr (t); args = tree_cons (NULL, t, args); t = build_fold_addr_expr (OMP_PARALLEL_FN (entry_stmt)); args = tree_cons (NULL, t, args); if (ws_args) args = chainon (args, ws_args); t = built_in_decls[start_ix]; t = build_function_call_expr (t, args); gimplify_and_add (t, &list); t = OMP_PARALLEL_DATA_ARG (entry_stmt); if (t == NULL) t = null_pointer_node; else t = build_fold_addr_expr (t); args = tree_cons (NULL, t, NULL); t = build_function_call_expr (OMP_PARALLEL_FN (entry_stmt), args); gimplify_and_add (t, &list); t = built_in_decls[BUILT_IN_GOMP_PARALLEL_END]; t = build_function_call_expr (t, NULL); gimplify_and_add (t, &list); si = bsi_last (bb); bsi_insert_after (&si, list, BSI_CONTINUE_LINKING); pop_gimplify_context (NULL_TREE); } /* If exceptions are enabled, wrap *STMT_P in a MUST_NOT_THROW catch handler. This prevents programs from violating the structured block semantics with throws. */ static void maybe_catch_exception (tree *stmt_p) { tree f, t; if (!flag_exceptions) return; if (lang_protect_cleanup_actions) t = lang_protect_cleanup_actions (); else { t = built_in_decls[BUILT_IN_TRAP]; t = build_function_call_expr (t, NULL); } f = build2 (EH_FILTER_EXPR, void_type_node, NULL, NULL); EH_FILTER_MUST_NOT_THROW (f) = 1; gimplify_and_add (t, &EH_FILTER_FAILURE (f)); t = build2 (TRY_CATCH_EXPR, void_type_node, *stmt_p, NULL); append_to_statement_list (f, &TREE_OPERAND (t, 1)); *stmt_p = NULL; append_to_statement_list (t, stmt_p); } /* Chain all the DECLs in LIST by their TREE_CHAIN fields. */ static tree list2chain (tree list) { tree t; for (t = list; t; t = TREE_CHAIN (t)) { tree var = TREE_VALUE (t); if (TREE_CHAIN (t)) TREE_CHAIN (var) = TREE_VALUE (TREE_CHAIN (t)); else TREE_CHAIN (var) = NULL_TREE; } return list ? TREE_VALUE (list) : NULL_TREE; } /* Remove barriers in REGION->EXIT's block. Note that this is only valid for OMP_PARALLEL regions. Since the end of a parallel region is an implicit barrier, any workshare inside the OMP_PARALLEL that left a barrier at the end of the OMP_PARALLEL region can now be removed. */ static void remove_exit_barrier (struct omp_region *region) { block_stmt_iterator si; basic_block exit_bb; edge_iterator ei; edge e; tree t; exit_bb = region->exit; /* If the parallel region doesn't return, we don't have REGION->EXIT block at all. */ if (! exit_bb) return; /* The last insn in the block will be the parallel's OMP_RETURN. The workshare's OMP_RETURN will be in a preceding block. The kinds of statements that can appear in between are extremely limited -- no memory operations at all. Here, we allow nothing at all, so the only thing we allow to precede this OMP_RETURN is a label. */ si = bsi_last (exit_bb); gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_RETURN); bsi_prev (&si); if (!bsi_end_p (si) && TREE_CODE (bsi_stmt (si)) != LABEL_EXPR) return; FOR_EACH_EDGE (e, ei, exit_bb->preds) { si = bsi_last (e->src); if (bsi_end_p (si)) continue; t = bsi_stmt (si); if (TREE_CODE (t) == OMP_RETURN) OMP_RETURN_NOWAIT (t) = 1; } } static void remove_exit_barriers (struct omp_region *region) { if (region->type == OMP_PARALLEL) remove_exit_barrier (region); if (region->inner) { region = region->inner; remove_exit_barriers (region); while (region->next) { region = region->next; remove_exit_barriers (region); } } } /* Expand the OpenMP parallel directive starting at REGION. */ static void expand_omp_parallel (struct omp_region *region) { basic_block entry_bb, exit_bb, new_bb; struct function *child_cfun, *saved_cfun; tree child_fn, block, t, ws_args; block_stmt_iterator si; tree entry_stmt; edge e; bool do_cleanup_cfg = false; entry_stmt = last_stmt (region->entry); child_fn = OMP_PARALLEL_FN (entry_stmt); child_cfun = DECL_STRUCT_FUNCTION (child_fn); saved_cfun = cfun; entry_bb = region->entry; exit_bb = region->exit; if (is_combined_parallel (region)) ws_args = region->ws_args; else ws_args = NULL_TREE; if (child_cfun->cfg) { /* Due to inlining, it may happen that we have already outlined the region, in which case all we need to do is make the sub-graph unreachable and emit the parallel call. */ edge entry_succ_e, exit_succ_e; block_stmt_iterator si; entry_succ_e = single_succ_edge (entry_bb); si = bsi_last (entry_bb); gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_PARALLEL); bsi_remove (&si, true); new_bb = entry_bb; remove_edge (entry_succ_e); if (exit_bb) { exit_succ_e = single_succ_edge (exit_bb); make_edge (new_bb, exit_succ_e->dest, EDGE_FALLTHRU); } do_cleanup_cfg = true; } else { /* If the parallel region needs data sent from the parent function, then the very first statement (except possible tree profile counter updates) of the parallel body is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since &.OMP_DATA_O is passed as an argument to the child function, we need to replace it with the argument as seen by the child function. In most cases, this will end up being the identity assignment .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had a function call that has been inlined, the original PARM_DECL .OMP_DATA_I may have been converted into a different local variable. In which case, we need to keep the assignment. */ if (OMP_PARALLEL_DATA_ARG (entry_stmt)) { basic_block entry_succ_bb = single_succ (entry_bb); block_stmt_iterator si; for (si = bsi_start (entry_succ_bb); ; bsi_next (&si)) { tree stmt, arg; gcc_assert (!bsi_end_p (si)); stmt = bsi_stmt (si); if (TREE_CODE (stmt) != MODIFY_EXPR) continue; arg = TREE_OPERAND (stmt, 1); STRIP_NOPS (arg); if (TREE_CODE (arg) == ADDR_EXPR && TREE_OPERAND (arg, 0) == OMP_PARALLEL_DATA_ARG (entry_stmt)) { if (TREE_OPERAND (stmt, 0) == DECL_ARGUMENTS (child_fn)) bsi_remove (&si, true); else TREE_OPERAND (stmt, 1) = DECL_ARGUMENTS (child_fn); break; } } } /* Declare local variables needed in CHILD_CFUN. */ block = DECL_INITIAL (child_fn); BLOCK_VARS (block) = list2chain (child_cfun->unexpanded_var_list); DECL_SAVED_TREE (child_fn) = single_succ (entry_bb)->stmt_list; /* Reset DECL_CONTEXT on locals and function arguments. */ for (t = BLOCK_VARS (block); t; t = TREE_CHAIN (t)) DECL_CONTEXT (t) = child_fn; for (t = DECL_ARGUMENTS (child_fn); t; t = TREE_CHAIN (t)) DECL_CONTEXT (t) = child_fn; /* Split ENTRY_BB at OMP_PARALLEL so that it can be moved to the child function. */ si = bsi_last (entry_bb); t = bsi_stmt (si); gcc_assert (t && TREE_CODE (t) == OMP_PARALLEL); bsi_remove (&si, true); e = split_block (entry_bb, t); entry_bb = e->dest; single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU; /* Move the parallel region into CHILD_CFUN. We need to reset dominance information because the expansion of the inner regions has invalidated it. */ free_dominance_info (CDI_DOMINATORS); new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb); if (exit_bb) single_succ_edge (new_bb)->flags = EDGE_FALLTHRU; cgraph_add_new_function (child_fn); /* Convert OMP_RETURN into a RETURN_EXPR. */ if (exit_bb) { si = bsi_last (exit_bb); gcc_assert (!bsi_end_p (si) && TREE_CODE (bsi_stmt (si)) == OMP_RETURN); t = build1 (RETURN_EXPR, void_type_node, NULL); bsi_insert_after (&si, t, BSI_SAME_STMT); bsi_remove (&si, true); } } /* Emit a library call to launch the children threads. */ expand_parallel_call (region, new_bb, entry_stmt, ws_args); if (do_cleanup_cfg) { /* Clean up the unreachable sub-graph we created above. */ free_dominance_info (CDI_DOMINATORS); free_dominance_info (CDI_POST_DOMINATORS); cleanup_tree_cfg (); } } /* A subroutine of expand_omp_for. Generate code for a parallel loop with any schedule. Given parameters: for (V = N1; V cond N2; V += STEP) BODY; where COND is "<" or ">", we generate pseudocode more = GOMP_loop_foo_start (N1, N2, STEP, CHUNK, &istart0, &iend0); if (more) goto L0; else goto L3; L0: V = istart0; iend = iend0; L1: BODY; V += STEP; if (V cond iend) goto L1; else goto L2; L2: if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3; L3: If this is a combined omp parallel loop, instead of the call to GOMP_loop_foo_start, we emit 'goto L3'. */ static void expand_omp_for_generic (struct omp_region *region, struct omp_for_data *fd, enum built_in_function start_fn, enum built_in_function next_fn) { tree l0, l1, l2 = NULL, l3 = NULL; tree type, istart0, iend0, iend; tree t, args, list; basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb; basic_block l2_bb = NULL, l3_bb = NULL; block_stmt_iterator si; bool in_combined_parallel = is_combined_parallel (region); type = TREE_TYPE (fd->v); istart0 = create_tmp_var (long_integer_type_node, ".istart0"); iend0 = create_tmp_var (long_integer_type_node, ".iend0"); iend = create_tmp_var (type, NULL); TREE_ADDRESSABLE (istart0) = 1; TREE_ADDRESSABLE (iend0) = 1; gcc_assert ((region->cont != NULL) ^ (region->exit == NULL)); entry_bb = region->entry; l0_bb = create_empty_bb (entry_bb); l1_bb = single_succ (entry_bb); l0 = tree_block_label (l0_bb); l1 = tree_block_label (l1_bb); cont_bb = region->cont; exit_bb = region->exit; if (cont_bb) { l2_bb = create_empty_bb (cont_bb); l3_bb = single_succ (cont_bb); l2 = tree_block_label (l2_bb); l3 = tree_block_label (l3_bb); } si = bsi_last (entry_bb); gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_FOR); if (!in_combined_parallel) { /* If this is not a combined parallel loop, emit a call to GOMP_loop_foo_start in ENTRY_BB. */ list = alloc_stmt_list (); t = build_fold_addr_expr (iend0); args = tree_cons (NULL, t, NULL); t = build_fold_addr_expr (istart0); args = tree_cons (NULL, t, args); if (fd->chunk_size) { t = fold_convert (long_integer_type_node, fd->chunk_size); args = tree_cons (NULL, t, args); } t = fold_convert (long_integer_type_node, fd->step); args = tree_cons (NULL, t, args); t = fold_convert (long_integer_type_node, fd->n2); args = tree_cons (NULL, t, args); t = fold_convert (long_integer_type_node, fd->n1); args = tree_cons (NULL, t, args); t = build_function_call_expr (built_in_decls[start_fn], args); t = get_formal_tmp_var (t, &list); if (cont_bb) { t = build3 (COND_EXPR, void_type_node, t, build_and_jump (&l0), build_and_jump (&l3)); append_to_statement_list (t, &list); } bsi_insert_after (&si, list, BSI_SAME_STMT); } bsi_remove (&si, true); /* Iteration setup for sequential loop goes in L0_BB. */ list = alloc_stmt_list (); t = fold_convert (type, istart0); t = build2 (MODIFY_EXPR, void_type_node, fd->v, t); gimplify_and_add (t, &list); t = fold_convert (type, iend0); t = build2 (MODIFY_EXPR, void_type_node, iend, t); gimplify_and_add (t, &list); si = bsi_start (l0_bb); bsi_insert_after (&si, list, BSI_CONTINUE_LINKING); /* Handle the rare case where BODY doesn't ever return. */ if (cont_bb == NULL) { remove_edge (single_succ_edge (entry_bb)); make_edge (entry_bb, l0_bb, EDGE_FALLTHRU); make_edge (l0_bb, l1_bb, EDGE_FALLTHRU); return; } /* Code to control the increment and predicate for the sequential loop goes in the first half of EXIT_BB (we split EXIT_BB so that we can inherit all the edges going out of the loop body). */ list = alloc_stmt_list (); t = build2 (PLUS_EXPR, type, fd->v, fd->step); t = build2 (MODIFY_EXPR, void_type_node, fd->v, t); gimplify_and_add (t, &list); t = build2 (fd->cond_code, boolean_type_node, fd->v, iend); t = get_formal_tmp_var (t, &list); t = build3 (COND_EXPR, void_type_node, t, build_and_jump (&l1), build_and_jump (&l2)); append_to_statement_list (t, &list); si = bsi_last (cont_bb); bsi_insert_after (&si, list, BSI_SAME_STMT); gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_CONTINUE); bsi_remove (&si, true); /* Emit code to get the next parallel iteration in L2_BB. */ list = alloc_stmt_list (); t = build_fold_addr_expr (iend0); args = tree_cons (NULL, t, NULL); t = build_fold_addr_expr (istart0); args = tree_cons (NULL, t, args); t = build_function_call_expr (built_in_decls[next_fn], args); t = get_formal_tmp_var (t, &list); t = build3 (COND_EXPR, void_type_node, t, build_and_jump (&l0), build_and_jump (&l3)); append_to_statement_list (t, &list); si = bsi_start (l2_bb); bsi_insert_after (&si, list, BSI_CONTINUE_LINKING); /* Add the loop cleanup function. */ si = bsi_last (exit_bb); if (OMP_RETURN_NOWAIT (bsi_stmt (si))) t = built_in_decls[BUILT_IN_GOMP_LOOP_END_NOWAIT]; else t = built_in_decls[BUILT_IN_GOMP_LOOP_END]; t = build_function_call_expr (t, NULL); bsi_insert_after (&si, t, BSI_SAME_STMT); bsi_remove (&si, true); /* Connect the new blocks. */ remove_edge (single_succ_edge (entry_bb)); if (in_combined_parallel) make_edge (entry_bb, l2_bb, EDGE_FALLTHRU); else { make_edge (entry_bb, l0_bb, EDGE_TRUE_VALUE); make_edge (entry_bb, l3_bb, EDGE_FALSE_VALUE); } make_edge (l0_bb, l1_bb, EDGE_FALLTHRU); remove_edge (single_succ_edge (cont_bb)); make_edge (cont_bb, l1_bb, EDGE_TRUE_VALUE); make_edge (cont_bb, l2_bb, EDGE_FALSE_VALUE); make_edge (l2_bb, l0_bb, EDGE_TRUE_VALUE); make_edge (l2_bb, l3_bb, EDGE_FALSE_VALUE); } /* A subroutine of expand_omp_for. Generate code for a parallel loop with static schedule and no specified chunk size. Given parameters: for (V = N1; V cond N2; V += STEP) BODY; where COND is "<" or ">", we generate pseudocode if (cond is <) adj = STEP - 1; else adj = STEP + 1; n = (adj + N2 - N1) / STEP; q = n / nthreads; q += (q * nthreads != n); s0 = q * threadid; e0 = min(s0 + q, n); if (s0 >= e0) goto L2; else goto L0; L0: V = s0 * STEP + N1; e = e0 * STEP + N1; L1: BODY; V += STEP; if (V cond e) goto L1; L2: */ static void expand_omp_for_static_nochunk (struct omp_region *region, struct omp_for_data *fd) { tree l0, l1, l2, n, q, s0, e0, e, t, nthreads, threadid; tree type, list; basic_block entry_bb, exit_bb, seq_start_bb, body_bb, cont_bb; basic_block fin_bb; block_stmt_iterator si; type = TREE_TYPE (fd->v); entry_bb = region->entry; seq_start_bb = create_empty_bb (entry_bb); body_bb = single_succ (entry_bb); cont_bb = region->cont; fin_bb = single_succ (cont_bb); exit_bb = region->exit; l0 = tree_block_label (seq_start_bb); l1 = tree_block_label (body_bb); l2 = tree_block_label (fin_bb); /* Iteration space partitioning goes in ENTRY_BB. */ list = alloc_stmt_list (); t = built_in_decls[BUILT_IN_OMP_GET_NUM_THREADS]; t = build_function_call_expr (t, NULL); t = fold_convert (type, t); nthreads = get_formal_tmp_var (t, &list); t = built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM]; t = build_function_call_expr (t, NULL); t = fold_convert (type, t); threadid = get_formal_tmp_var (t, &list); fd->n1 = fold_convert (type, fd->n1); if (!is_gimple_val (fd->n1)) fd->n1 = get_formal_tmp_var (fd->n1, &list); fd->n2 = fold_convert (type, fd->n2); if (!is_gimple_val (fd->n2)) fd->n2 = get_formal_tmp_var (fd->n2, &list); fd->step = fold_convert (type, fd->step); if (!is_gimple_val (fd->step)) fd->step = get_formal_tmp_var (fd->step, &list); t = build_int_cst (type, (fd->cond_code == LT_EXPR ? -1 : 1)); t = fold_build2 (PLUS_EXPR, type, fd->step, t); t = fold_build2 (PLUS_EXPR, type, t, fd->n2); t = fold_build2 (MINUS_EXPR, type, t, fd->n1); t = fold_build2 (TRUNC_DIV_EXPR, type, t, fd->step); t = fold_convert (type, t); if (is_gimple_val (t)) n = t; else n = get_formal_tmp_var (t, &list); t = build2 (TRUNC_DIV_EXPR, type, n, nthreads); q = get_formal_tmp_var (t, &list); t = build2 (MULT_EXPR, type, q, nthreads); t = build2 (NE_EXPR, type, t, n); t = build2 (PLUS_EXPR, type, q, t); q = get_formal_tmp_var (t, &list); t = build2 (MULT_EXPR, type, q, threadid); s0 = get_formal_tmp_var (t, &list); t = build2 (PLUS_EXPR, type, s0, q); t = build2 (MIN_EXPR, type, t, n); e0 = get_formal_tmp_var (t, &list); t = build2 (GE_EXPR, boolean_type_node, s0, e0); t = build3 (COND_EXPR, void_type_node, t, build_and_jump (&l2), build_and_jump (&l0)); append_to_statement_list (t, &list); si = bsi_last (entry_bb); gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_FOR); bsi_insert_after (&si, list, BSI_SAME_STMT); bsi_remove (&si, true); /* Setup code for sequential iteration goes in SEQ_START_BB. */ list = alloc_stmt_list (); t = fold_convert (type, s0); t = build2 (MULT_EXPR, type, t, fd->step); t = build2 (PLUS_EXPR, type, t, fd->n1); t = build2 (MODIFY_EXPR, void_type_node, fd->v, t); /* LLVM LOCAL begin 7387470 */ /* Decorate this MODIFY_EXPR with the correct lexical block. */ { extern tree debug_find_var_in_block_tree (tree, tree); TREE_BLOCK(t) = debug_find_var_in_block_tree(fd->v, DECL_INITIAL(current_function_decl)); } /* LLVM LOCAL end 7387470 */ gimplify_and_add (t, &list); t = fold_convert (type, e0); t = build2 (MULT_EXPR, type, t, fd->step); t = build2 (PLUS_EXPR, type, t, fd->n1); e = get_formal_tmp_var (t, &list); si = bsi_start (seq_start_bb); bsi_insert_after (&si, list, BSI_CONTINUE_LINKING); /* The code controlling the sequential loop replaces the OMP_CONTINUE. */ list = alloc_stmt_list (); t = build2 (PLUS_EXPR, type, fd->v, fd->step); t = build2 (MODIFY_EXPR, void_type_node, fd->v, t); gimplify_and_add (t, &list); t = build2 (fd->cond_code, boolean_type_node, fd->v, e); t = get_formal_tmp_var (t, &list); t = build3 (COND_EXPR, void_type_node, t, build_and_jump (&l1), build_and_jump (&l2)); append_to_statement_list (t, &list); si = bsi_last (cont_bb); gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_CONTINUE); bsi_insert_after (&si, list, BSI_SAME_STMT); bsi_remove (&si, true); /* Replace the OMP_RETURN with a barrier, or nothing. */ si = bsi_last (exit_bb); if (!OMP_RETURN_NOWAIT (bsi_stmt (si))) { list = alloc_stmt_list (); build_omp_barrier (&list); bsi_insert_after (&si, list, BSI_SAME_STMT); } bsi_remove (&si, true); /* Connect all the blocks. */ make_edge (seq_start_bb, body_bb, EDGE_FALLTHRU); remove_edge (single_succ_edge (entry_bb)); make_edge (entry_bb, fin_bb, EDGE_TRUE_VALUE); make_edge (entry_bb, seq_start_bb, EDGE_FALSE_VALUE); make_edge (cont_bb, body_bb, EDGE_TRUE_VALUE); find_edge (cont_bb, fin_bb)->flags = EDGE_FALSE_VALUE; } /* A subroutine of expand_omp_for. Generate code for a parallel loop with static schedule and a specified chunk size. Given parameters: for (V = N1; V cond N2; V += STEP) BODY; where COND is "<" or ">", we generate pseudocode if (cond is <) adj = STEP - 1; else adj = STEP + 1; n = (adj + N2 - N1) / STEP; trip = 0; L0: s0 = (trip * nthreads + threadid) * CHUNK; e0 = min(s0 + CHUNK, n); if (s0 < n) goto L1; else goto L4; L1: V = s0 * STEP + N1; e = e0 * STEP + N1; L2: BODY; V += STEP; if (V cond e) goto L2; else goto L3; L3: trip += 1; goto L0; L4: */ static void expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd) { tree l0, l1, l2, l3, l4, n, s0, e0, e, t; tree trip, nthreads, threadid; tree type; basic_block entry_bb, exit_bb, body_bb, seq_start_bb, iter_part_bb; basic_block trip_update_bb, cont_bb, fin_bb; tree list; block_stmt_iterator si; type = TREE_TYPE (fd->v); entry_bb = region->entry; iter_part_bb = create_empty_bb (entry_bb); seq_start_bb = create_empty_bb (iter_part_bb); body_bb = single_succ (entry_bb); cont_bb = region->cont; trip_update_bb = create_empty_bb (cont_bb); fin_bb = single_succ (cont_bb); exit_bb = region->exit; l0 = tree_block_label (iter_part_bb); l1 = tree_block_label (seq_start_bb); l2 = tree_block_label (body_bb); l3 = tree_block_label (trip_update_bb); l4 = tree_block_label (fin_bb); /* Trip and adjustment setup goes in ENTRY_BB. */ list = alloc_stmt_list (); t = built_in_decls[BUILT_IN_OMP_GET_NUM_THREADS]; t = build_function_call_expr (t, NULL); t = fold_convert (type, t); nthreads = get_formal_tmp_var (t, &list); t = built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM]; t = build_function_call_expr (t, NULL); t = fold_convert (type, t); threadid = get_formal_tmp_var (t, &list); fd->n1 = fold_convert (type, fd->n1); if (!is_gimple_val (fd->n1)) fd->n1 = get_formal_tmp_var (fd->n1, &list); fd->n2 = fold_convert (type, fd->n2); if (!is_gimple_val (fd->n2)) fd->n2 = get_formal_tmp_var (fd->n2, &list); fd->step = fold_convert (type, fd->step); if (!is_gimple_val (fd->step)) fd->step = get_formal_tmp_var (fd->step, &list); fd->chunk_size = fold_convert (type, fd->chunk_size); if (!is_gimple_val (fd->chunk_size)) fd->chunk_size = get_formal_tmp_var (fd->chunk_size, &list); t = build_int_cst (type, (fd->cond_code == LT_EXPR ? -1 : 1)); t = fold_build2 (PLUS_EXPR, type, fd->step, t); t = fold_build2 (PLUS_EXPR, type, t, fd->n2); t = fold_build2 (MINUS_EXPR, type, t, fd->n1); t = fold_build2 (TRUNC_DIV_EXPR, type, t, fd->step); t = fold_convert (type, t); if (is_gimple_val (t)) n = t; else n = get_formal_tmp_var (t, &list); t = build_int_cst (type, 0); trip = get_initialized_tmp_var (t, &list, NULL); si = bsi_last (entry_bb); gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_FOR); bsi_insert_after (&si, list, BSI_SAME_STMT); bsi_remove (&si, true); /* Iteration space partitioning goes in ITER_PART_BB. */ list = alloc_stmt_list (); t = build2 (MULT_EXPR, type, trip, nthreads); t = build2 (PLUS_EXPR, type, t, threadid); t = build2 (MULT_EXPR, type, t, fd->chunk_size); s0 = get_formal_tmp_var (t, &list); t = build2 (PLUS_EXPR, type, s0, fd->chunk_size); t = build2 (MIN_EXPR, type, t, n); e0 = get_formal_tmp_var (t, &list); t = build2 (LT_EXPR, boolean_type_node, s0, n); t = build3 (COND_EXPR, void_type_node, t, build_and_jump (&l1), build_and_jump (&l4)); append_to_statement_list (t, &list); si = bsi_start (iter_part_bb); bsi_insert_after (&si, list, BSI_CONTINUE_LINKING); /* Setup code for sequential iteration goes in SEQ_START_BB. */ list = alloc_stmt_list (); t = fold_convert (type, s0); t = build2 (MULT_EXPR, type, t, fd->step); t = build2 (PLUS_EXPR, type, t, fd->n1); t = build2 (MODIFY_EXPR, void_type_node, fd->v, t); gimplify_and_add (t, &list); t = fold_convert (type, e0); t = build2 (MULT_EXPR, type, t, fd->step); t = build2 (PLUS_EXPR, type, t, fd->n1); e = get_formal_tmp_var (t, &list); si = bsi_start (seq_start_bb); bsi_insert_after (&si, list, BSI_CONTINUE_LINKING); /* The code controlling the sequential loop goes in CONT_BB, replacing the OMP_CONTINUE. */ list = alloc_stmt_list (); t = build2 (PLUS_EXPR, type, fd->v, fd->step); t = build2 (MODIFY_EXPR, void_type_node, fd->v, t); gimplify_and_add (t, &list); t = build2 (fd->cond_code, boolean_type_node, fd->v, e); t = get_formal_tmp_var (t, &list); t = build3 (COND_EXPR, void_type_node, t, build_and_jump (&l2), build_and_jump (&l3)); append_to_statement_list (t, &list); si = bsi_last (cont_bb); gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_CONTINUE); bsi_insert_after (&si, list, BSI_SAME_STMT); bsi_remove (&si, true); /* Trip update code goes into TRIP_UPDATE_BB. */ list = alloc_stmt_list (); t = build_int_cst (type, 1); t = build2 (PLUS_EXPR, type, trip, t); t = build2 (MODIFY_EXPR, void_type_node, trip, t); gimplify_and_add (t, &list); si = bsi_start (trip_update_bb); bsi_insert_after (&si, list, BSI_CONTINUE_LINKING); /* Replace the OMP_RETURN with a barrier, or nothing. */ si = bsi_last (exit_bb); if (!OMP_RETURN_NOWAIT (bsi_stmt (si))) { list = alloc_stmt_list (); build_omp_barrier (&list); bsi_insert_after (&si, list, BSI_SAME_STMT); } bsi_remove (&si, true); /* Connect the new blocks. */ remove_edge (single_succ_edge (entry_bb)); make_edge (entry_bb, iter_part_bb, EDGE_FALLTHRU); make_edge (iter_part_bb, seq_start_bb, EDGE_TRUE_VALUE); make_edge (iter_part_bb, fin_bb, EDGE_FALSE_VALUE); make_edge (seq_start_bb, body_bb, EDGE_FALLTHRU); remove_edge (single_succ_edge (cont_bb)); make_edge (cont_bb, body_bb, EDGE_TRUE_VALUE); make_edge (cont_bb, trip_update_bb, EDGE_FALSE_VALUE); make_edge (trip_update_bb, iter_part_bb, EDGE_FALLTHRU); } /* Expand the OpenMP loop defined by REGION. */ static void expand_omp_for (struct omp_region *region) { struct omp_for_data fd; push_gimplify_context (); extract_omp_for_data (last_stmt (region->entry), &fd); region->sched_kind = fd.sched_kind; if (fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC && !fd.have_ordered && region->cont && region->exit) { if (fd.chunk_size == NULL) expand_omp_for_static_nochunk (region, &fd); else expand_omp_for_static_chunk (region, &fd); } else { int fn_index = fd.sched_kind + fd.have_ordered * 4; int start_ix = BUILT_IN_GOMP_LOOP_STATIC_START + fn_index; int next_ix = BUILT_IN_GOMP_LOOP_STATIC_NEXT + fn_index; expand_omp_for_generic (region, &fd, start_ix, next_ix); } pop_gimplify_context (NULL); } /* Expand code for an OpenMP sections directive. In pseudo code, we generate v = GOMP_sections_start (n); L0: switch (v) { case 0: goto L2; case 1: section 1; goto L1; case 2: ... case n: ... default: abort (); } L1: v = GOMP_sections_next (); goto L0; L2: reduction; If this is a combined parallel sections, replace the call to GOMP_sections_start with 'goto L1'. */ static void expand_omp_sections (struct omp_region *region) { tree label_vec, l0, l1, l2, t, u, v, sections_stmt; unsigned i, len; basic_block entry_bb, exit_bb, l0_bb, l1_bb, l2_bb, default_bb; block_stmt_iterator si; struct omp_region *inner; edge e; entry_bb = region->entry; l0_bb = create_empty_bb (entry_bb); l0 = tree_block_label (l0_bb); gcc_assert ((region->cont != NULL) ^ (region->exit == NULL)); l1_bb = region->cont; if (l1_bb) { l2_bb = single_succ (l1_bb); default_bb = create_empty_bb (l1_bb->prev_bb); l1 = tree_block_label (l1_bb); } else { l2_bb = create_empty_bb (l0_bb); default_bb = l2_bb; l1 = NULL; } l2 = tree_block_label (l2_bb); exit_bb = region->exit; v = create_tmp_var (unsigned_type_node, ".section"); /* We will build a switch() with enough cases for all the OMP_SECTION regions, a '0' case to handle the end of more work and a default case to abort if something goes wrong. */ len = EDGE_COUNT (entry_bb->succs); label_vec = make_tree_vec (len + 2); /* The call to GOMP_sections_start goes in ENTRY_BB, replacing the OMP_SECTIONS statement. */ si = bsi_last (entry_bb); sections_stmt = bsi_stmt (si); gcc_assert (TREE_CODE (sections_stmt) == OMP_SECTIONS); if (!is_combined_parallel (region)) { /* If we are not inside a combined parallel+sections region, call GOMP_sections_start. */ t = build_int_cst (unsigned_type_node, len); t = tree_cons (NULL, t, NULL); u = built_in_decls[BUILT_IN_GOMP_SECTIONS_START]; t = build_function_call_expr (u, t); t = build2 (MODIFY_EXPR, void_type_node, v, t); bsi_insert_after (&si, t, BSI_SAME_STMT); } bsi_remove (&si, true); /* The switch() statement replacing OMP_SECTIONS goes in L0_BB. */ si = bsi_start (l0_bb); t = build3 (SWITCH_EXPR, void_type_node, v, NULL, label_vec); bsi_insert_after (&si, t, BSI_CONTINUE_LINKING); t = build3 (CASE_LABEL_EXPR, void_type_node, build_int_cst (unsigned_type_node, 0), NULL, l2); TREE_VEC_ELT (label_vec, 0) = t; make_edge (l0_bb, l2_bb, 0); /* Convert each OMP_SECTION into a CASE_LABEL_EXPR. */ for (inner = region->inner, i = 1; inner; inner = inner->next, ++i) { basic_block s_entry_bb, s_exit_bb; s_entry_bb = inner->entry; s_exit_bb = inner->exit; t = tree_block_label (s_entry_bb); u = build_int_cst (unsigned_type_node, i); u = build3 (CASE_LABEL_EXPR, void_type_node, u, NULL, t); TREE_VEC_ELT (label_vec, i) = u; si = bsi_last (s_entry_bb); gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_SECTION); gcc_assert (i < len || OMP_SECTION_LAST (bsi_stmt (si))); bsi_remove (&si, true); e = single_pred_edge (s_entry_bb); e->flags = 0; redirect_edge_pred (e, l0_bb); single_succ_edge (s_entry_bb)->flags = EDGE_FALLTHRU; if (s_exit_bb == NULL) continue; si = bsi_last (s_exit_bb); gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_RETURN); bsi_remove (&si, true); single_succ_edge (s_exit_bb)->flags = EDGE_FALLTHRU; } /* Error handling code goes in DEFAULT_BB. */ t = tree_block_label (default_bb); u = build3 (CASE_LABEL_EXPR, void_type_node, NULL, NULL, t); TREE_VEC_ELT (label_vec, len + 1) = u; make_edge (l0_bb, default_bb, 0); si = bsi_start (default_bb); t = built_in_decls[BUILT_IN_TRAP]; t = build_function_call_expr (t, NULL); bsi_insert_after (&si, t, BSI_CONTINUE_LINKING); /* Code to get the next section goes in L1_BB. */ if (l1_bb) { si = bsi_last (l1_bb); gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_CONTINUE); t = built_in_decls[BUILT_IN_GOMP_SECTIONS_NEXT]; t = build_function_call_expr (t, NULL); t = build2 (MODIFY_EXPR, void_type_node, v, t); bsi_insert_after (&si, t, BSI_SAME_STMT); bsi_remove (&si, true); } /* Cleanup function replaces OMP_RETURN in EXIT_BB. */ if (exit_bb) { si = bsi_last (exit_bb); if (OMP_RETURN_NOWAIT (bsi_stmt (si))) t = built_in_decls[BUILT_IN_GOMP_SECTIONS_END_NOWAIT]; else t = built_in_decls[BUILT_IN_GOMP_SECTIONS_END]; t = build_function_call_expr (t, NULL); bsi_insert_after (&si, t, BSI_SAME_STMT); bsi_remove (&si, true); } /* Connect the new blocks. */ if (is_combined_parallel (region)) { /* If this was a combined parallel+sections region, we did not emit a GOMP_sections_start in the entry block, so we just need to jump to L1_BB to get the next section. */ make_edge (entry_bb, l1_bb, EDGE_FALLTHRU); } else make_edge (entry_bb, l0_bb, EDGE_FALLTHRU); if (l1_bb) { e = single_succ_edge (l1_bb); redirect_edge_succ (e, l0_bb); e->flags = EDGE_FALLTHRU; } } /* Expand code for an OpenMP single directive. We've already expanded much of the code, here we simply place the GOMP_barrier call. */ static void expand_omp_single (struct omp_region *region) { basic_block entry_bb, exit_bb; block_stmt_iterator si; bool need_barrier = false; entry_bb = region->entry; exit_bb = region->exit; si = bsi_last (entry_bb); /* The terminal barrier at the end of a GOMP_single_copy sequence cannot be removed. We need to ensure that the thread that entered the single does not exit before the data is copied out by the other threads. */ if (find_omp_clause (OMP_SINGLE_CLAUSES (bsi_stmt (si)), OMP_CLAUSE_COPYPRIVATE)) need_barrier = true; gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_SINGLE); bsi_remove (&si, true); single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU; si = bsi_last (exit_bb); if (!OMP_RETURN_NOWAIT (bsi_stmt (si)) || need_barrier) { tree t = alloc_stmt_list (); build_omp_barrier (&t); bsi_insert_after (&si, t, BSI_SAME_STMT); } bsi_remove (&si, true); single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU; } /* Generic expansion for OpenMP synchronization directives: master, ordered and critical. All we need to do here is remove the entry and exit markers for REGION. */ static void expand_omp_synch (struct omp_region *region) { basic_block entry_bb, exit_bb; block_stmt_iterator si; entry_bb = region->entry; exit_bb = region->exit; si = bsi_last (entry_bb); gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_SINGLE || TREE_CODE (bsi_stmt (si)) == OMP_MASTER || TREE_CODE (bsi_stmt (si)) == OMP_ORDERED || TREE_CODE (bsi_stmt (si)) == OMP_CRITICAL); bsi_remove (&si, true); single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU; if (exit_bb) { si = bsi_last (exit_bb); gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_RETURN); bsi_remove (&si, true); single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU; } } /* Expand the parallel region tree rooted at REGION. Expansion proceeds in depth-first order. Innermost regions are expanded first. This way, parallel regions that require a new function to be created (e.g., OMP_PARALLEL) can be expanded without having any internal dependencies in their body. */ static void expand_omp (struct omp_region *region) { while (region) { if (region->inner) expand_omp (region->inner); switch (region->type) { case OMP_PARALLEL: expand_omp_parallel (region); break; case OMP_FOR: expand_omp_for (region); break; case OMP_SECTIONS: expand_omp_sections (region); break; case OMP_SECTION: /* Individual omp sections are handled together with their parent OMP_SECTIONS region. */ break; case OMP_SINGLE: expand_omp_single (region); break; case OMP_MASTER: case OMP_ORDERED: case OMP_CRITICAL: expand_omp_synch (region); break; default: gcc_unreachable (); } region = region->next; } } /* Helper for build_omp_regions. Scan the dominator tree starting at block BB. PARENT is the region that contains BB. */ static void build_omp_regions_1 (basic_block bb, struct omp_region *parent) { block_stmt_iterator si; tree stmt; basic_block son; si = bsi_last (bb); if (!bsi_end_p (si) && OMP_DIRECTIVE_P (bsi_stmt (si))) { struct omp_region *region; enum tree_code code; stmt = bsi_stmt (si); code = TREE_CODE (stmt); if (code == OMP_RETURN) { /* STMT is the return point out of region PARENT. Mark it as the exit point and make PARENT the immediately enclosing region. */ gcc_assert (parent); region = parent; region->exit = bb; parent = parent->outer; /* If REGION is a parallel region, determine whether it is a combined parallel+workshare region. */ if (region->type == OMP_PARALLEL) determine_parallel_type (region); } else if (code == OMP_CONTINUE) { gcc_assert (parent); parent->cont = bb; } else { /* Otherwise, this directive becomes the parent for a new region. */ region = new_omp_region (bb, code, parent); parent = region; } } for (son = first_dom_son (CDI_DOMINATORS, bb); son; son = next_dom_son (CDI_DOMINATORS, son)) build_omp_regions_1 (son, parent); } /* Scan the CFG and build a tree of OMP regions. Return the root of the OMP region tree. */ static void build_omp_regions (void) { gcc_assert (root_omp_region == NULL); calculate_dominance_info (CDI_DOMINATORS); build_omp_regions_1 (ENTRY_BLOCK_PTR, NULL); } /* Main entry point for expanding OMP-GIMPLE into runtime calls. */ static unsigned int execute_expand_omp (void) { build_omp_regions (); if (!root_omp_region) return 0; if (dump_file) { fprintf (dump_file, "\nOMP region tree\n\n"); dump_omp_region (dump_file, root_omp_region, 0); fprintf (dump_file, "\n"); } remove_exit_barriers (root_omp_region); expand_omp (root_omp_region); free_dominance_info (CDI_DOMINATORS); free_dominance_info (CDI_POST_DOMINATORS); cleanup_tree_cfg (); free_omp_regions (); return 0; } static bool gate_expand_omp (void) { return flag_openmp != 0 && errorcount == 0; } struct tree_opt_pass pass_expand_omp = { "ompexp", /* name */ gate_expand_omp, /* gate */ execute_expand_omp, /* execute */ NULL, /* sub */ NULL, /* next */ 0, /* static_pass_number */ 0, /* tv_id */ PROP_gimple_any, /* properties_required */ PROP_gimple_lomp, /* properties_provided */ 0, /* properties_destroyed */ 0, /* todo_flags_start */ TODO_dump_func, /* todo_flags_finish */ 0 /* letter */ }; /* Routines to lower OpenMP directives into OMP-GIMPLE. */ /* Lower the OpenMP sections directive in *STMT_P. */ static void lower_omp_sections (tree *stmt_p, omp_context *ctx) { tree new_stmt, stmt, body, bind, block, ilist, olist, new_body; tree t, dlist; tree_stmt_iterator tsi; unsigned i, len; stmt = *stmt_p; push_gimplify_context (); dlist = NULL; ilist = NULL; lower_rec_input_clauses (OMP_SECTIONS_CLAUSES (stmt), &ilist, &dlist, ctx); tsi = tsi_start (OMP_SECTIONS_BODY (stmt)); for (len = 0; !tsi_end_p (tsi); len++, tsi_next (&tsi)) continue; tsi = tsi_start (OMP_SECTIONS_BODY (stmt)); body = alloc_stmt_list (); for (i = 0; i < len; i++, tsi_next (&tsi)) { omp_context *sctx; tree sec_start, sec_end; sec_start = tsi_stmt (tsi); sctx = maybe_lookup_ctx (sec_start); gcc_assert (sctx); append_to_statement_list (sec_start, &body); lower_omp (&OMP_SECTION_BODY (sec_start), sctx); append_to_statement_list (OMP_SECTION_BODY (sec_start), &body); OMP_SECTION_BODY (sec_start) = NULL; if (i == len - 1) { tree l = alloc_stmt_list (); lower_lastprivate_clauses (OMP_SECTIONS_CLAUSES (stmt), NULL, &l, ctx); append_to_statement_list (l, &body); OMP_SECTION_LAST (sec_start) = 1; } sec_end = make_node (OMP_RETURN); append_to_statement_list (sec_end, &body); } block = make_node (BLOCK); bind = build3 (BIND_EXPR, void_type_node, NULL, body, block); olist = NULL_TREE; lower_reduction_clauses (OMP_SECTIONS_CLAUSES (stmt), &olist, ctx); pop_gimplify_context (NULL_TREE); record_vars_into (ctx->block_vars, ctx->cb.dst_fn); new_stmt = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL); TREE_SIDE_EFFECTS (new_stmt) = 1; new_body = alloc_stmt_list (); append_to_statement_list (ilist, &new_body); append_to_statement_list (stmt, &new_body); append_to_statement_list (bind, &new_body); t = make_node (OMP_CONTINUE); append_to_statement_list (t, &new_body); append_to_statement_list (olist, &new_body); append_to_statement_list (dlist, &new_body); maybe_catch_exception (&new_body); t = make_node (OMP_RETURN); OMP_RETURN_NOWAIT (t) = !!find_omp_clause (OMP_SECTIONS_CLAUSES (stmt), OMP_CLAUSE_NOWAIT); append_to_statement_list (t, &new_body); BIND_EXPR_BODY (new_stmt) = new_body; OMP_SECTIONS_BODY (stmt) = NULL; *stmt_p = new_stmt; } /* A subroutine of lower_omp_single. Expand the simple form of an OMP_SINGLE, without a copyprivate clause: if (GOMP_single_start ()) BODY; [ GOMP_barrier (); ] -> unless 'nowait' is present. FIXME. It may be better to delay expanding the logic of this until pass_expand_omp. The expanded logic may make the job more difficult to a synchronization analysis pass. */ static void lower_omp_single_simple (tree single_stmt, tree *pre_p) { tree t; t = built_in_decls[BUILT_IN_GOMP_SINGLE_START]; t = build_function_call_expr (t, NULL); t = build3 (COND_EXPR, void_type_node, t, OMP_SINGLE_BODY (single_stmt), NULL); gimplify_and_add (t, pre_p); } /* A subroutine of lower_omp_single. Expand the simple form of an OMP_SINGLE, with a copyprivate clause: #pragma omp single copyprivate (a, b, c) Create a new structure to hold copies of 'a', 'b' and 'c' and emit: { if ((copyout_p = GOMP_single_copy_start ()) == NULL) { BODY; copyout.a = a; copyout.b = b; copyout.c = c; GOMP_single_copy_end (&copyout); } else { a = copyout_p->a; b = copyout_p->b; c = copyout_p->c; } GOMP_barrier (); } FIXME. It may be better to delay expanding the logic of this until pass_expand_omp. The expanded logic may make the job more difficult to a synchronization analysis pass. */ static void lower_omp_single_copy (tree single_stmt, tree *pre_p, omp_context *ctx) { tree ptr_type, t, args, l0, l1, l2, copyin_seq; ctx->sender_decl = create_tmp_var (ctx->record_type, ".omp_copy_o"); ptr_type = build_pointer_type (ctx->record_type); ctx->receiver_decl = create_tmp_var (ptr_type, ".omp_copy_i"); l0 = create_artificial_label (); l1 = create_artificial_label (); l2 = create_artificial_label (); t = built_in_decls[BUILT_IN_GOMP_SINGLE_COPY_START]; t = build_function_call_expr (t, NULL); t = fold_convert (ptr_type, t); t = build2 (MODIFY_EXPR, void_type_node, ctx->receiver_decl, t); gimplify_and_add (t, pre_p); t = build2 (EQ_EXPR, boolean_type_node, ctx->receiver_decl, build_int_cst (ptr_type, 0)); t = build3 (COND_EXPR, void_type_node, t, build_and_jump (&l0), build_and_jump (&l1)); gimplify_and_add (t, pre_p); t = build1 (LABEL_EXPR, void_type_node, l0); gimplify_and_add (t, pre_p); append_to_statement_list (OMP_SINGLE_BODY (single_stmt), pre_p); copyin_seq = NULL; lower_copyprivate_clauses (OMP_SINGLE_CLAUSES (single_stmt), pre_p, &copyin_seq, ctx); t = build_fold_addr_expr (ctx->sender_decl); args = tree_cons (NULL, t, NULL); t = built_in_decls[BUILT_IN_GOMP_SINGLE_COPY_END]; t = build_function_call_expr (t, args); gimplify_and_add (t, pre_p); t = build_and_jump (&l2); gimplify_and_add (t, pre_p); t = build1 (LABEL_EXPR, void_type_node, l1); gimplify_and_add (t, pre_p); append_to_statement_list (copyin_seq, pre_p); t = build1 (LABEL_EXPR, void_type_node, l2); gimplify_and_add (t, pre_p); } /* Expand code for an OpenMP single directive. */ static void lower_omp_single (tree *stmt_p, omp_context *ctx) { tree t, bind, block, single_stmt = *stmt_p, dlist; push_gimplify_context (); block = make_node (BLOCK); *stmt_p = bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, block); TREE_SIDE_EFFECTS (bind) = 1; lower_rec_input_clauses (OMP_SINGLE_CLAUSES (single_stmt), &BIND_EXPR_BODY (bind), &dlist, ctx); lower_omp (&OMP_SINGLE_BODY (single_stmt), ctx); append_to_statement_list (single_stmt, &BIND_EXPR_BODY (bind)); if (ctx->record_type) lower_omp_single_copy (single_stmt, &BIND_EXPR_BODY (bind), ctx); else lower_omp_single_simple (single_stmt, &BIND_EXPR_BODY (bind)); OMP_SINGLE_BODY (single_stmt) = NULL; append_to_statement_list (dlist, &BIND_EXPR_BODY (bind)); maybe_catch_exception (&BIND_EXPR_BODY (bind)); t = make_node (OMP_RETURN); OMP_RETURN_NOWAIT (t) = !!find_omp_clause (OMP_SINGLE_CLAUSES (single_stmt), OMP_CLAUSE_NOWAIT); append_to_statement_list (t, &BIND_EXPR_BODY (bind)); pop_gimplify_context (bind); BIND_EXPR_VARS (bind) = chainon (BIND_EXPR_VARS (bind), ctx->block_vars); BLOCK_VARS (block) = BIND_EXPR_VARS (bind); } /* Expand code for an OpenMP master directive. */ static void lower_omp_master (tree *stmt_p, omp_context *ctx) { tree bind, block, stmt = *stmt_p, lab = NULL, x; push_gimplify_context (); block = make_node (BLOCK); *stmt_p = bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, block); TREE_SIDE_EFFECTS (bind) = 1; append_to_statement_list (stmt, &BIND_EXPR_BODY (bind)); x = built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM]; x = build_function_call_expr (x, NULL); x = build2 (EQ_EXPR, boolean_type_node, x, integer_zero_node); x = build3 (COND_EXPR, void_type_node, x, NULL, build_and_jump (&lab)); gimplify_and_add (x, &BIND_EXPR_BODY (bind)); lower_omp (&OMP_MASTER_BODY (stmt), ctx); maybe_catch_exception (&OMP_MASTER_BODY (stmt)); append_to_statement_list (OMP_MASTER_BODY (stmt), &BIND_EXPR_BODY (bind)); OMP_MASTER_BODY (stmt) = NULL; x = build1 (LABEL_EXPR, void_type_node, lab); gimplify_and_add (x, &BIND_EXPR_BODY (bind)); x = make_node (OMP_RETURN); OMP_RETURN_NOWAIT (x) = 1; append_to_statement_list (x, &BIND_EXPR_BODY (bind)); pop_gimplify_context (bind); BIND_EXPR_VARS (bind) = chainon (BIND_EXPR_VARS (bind), ctx->block_vars); BLOCK_VARS (block) = BIND_EXPR_VARS (bind); } /* Expand code for an OpenMP ordered directive. */ static void lower_omp_ordered (tree *stmt_p, omp_context *ctx) { tree bind, block, stmt = *stmt_p, x; push_gimplify_context (); block = make_node (BLOCK); *stmt_p = bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, block); TREE_SIDE_EFFECTS (bind) = 1; append_to_statement_list (stmt, &BIND_EXPR_BODY (bind)); x = built_in_decls[BUILT_IN_GOMP_ORDERED_START]; x = build_function_call_expr (x, NULL); gimplify_and_add (x, &BIND_EXPR_BODY (bind)); lower_omp (&OMP_ORDERED_BODY (stmt), ctx); maybe_catch_exception (&OMP_ORDERED_BODY (stmt)); append_to_statement_list (OMP_ORDERED_BODY (stmt), &BIND_EXPR_BODY (bind)); OMP_ORDERED_BODY (stmt) = NULL; x = built_in_decls[BUILT_IN_GOMP_ORDERED_END]; x = build_function_call_expr (x, NULL); gimplify_and_add (x, &BIND_EXPR_BODY (bind)); x = make_node (OMP_RETURN); OMP_RETURN_NOWAIT (x) = 1; append_to_statement_list (x, &BIND_EXPR_BODY (bind)); pop_gimplify_context (bind); BIND_EXPR_VARS (bind) = chainon (BIND_EXPR_VARS (bind), ctx->block_vars); BLOCK_VARS (block) = BIND_EXPR_VARS (bind); } /* Gimplify an OMP_CRITICAL statement. This is a relatively simple substitution of a couple of function calls. But in the NAMED case, requires that languages coordinate a symbol name. It is therefore best put here in common code. */ static GTY((param1_is (tree), param2_is (tree))) splay_tree critical_name_mutexes; static void lower_omp_critical (tree *stmt_p, omp_context *ctx) { tree bind, block, stmt = *stmt_p; tree t, lock, unlock, name; name = OMP_CRITICAL_NAME (stmt); if (name) { tree decl, args; splay_tree_node n; if (!critical_name_mutexes) critical_name_mutexes = splay_tree_new_ggc (splay_tree_compare_pointers); n = splay_tree_lookup (critical_name_mutexes, (splay_tree_key) name); if (n == NULL) { char *new_str; decl = create_tmp_var_raw (ptr_type_node, NULL); new_str = ACONCAT ((".gomp_critical_user_", IDENTIFIER_POINTER (name), NULL)); DECL_NAME (decl) = get_identifier (new_str); TREE_PUBLIC (decl) = 1; TREE_STATIC (decl) = 1; DECL_COMMON (decl) = 1; DECL_ARTIFICIAL (decl) = 1; DECL_IGNORED_P (decl) = 1; cgraph_varpool_finalize_decl (decl); splay_tree_insert (critical_name_mutexes, (splay_tree_key) name, (splay_tree_value) decl); } else decl = (tree) n->value; args = tree_cons (NULL, build_fold_addr_expr (decl), NULL); lock = built_in_decls[BUILT_IN_GOMP_CRITICAL_NAME_START]; lock = build_function_call_expr (lock, args); args = tree_cons (NULL, build_fold_addr_expr (decl), NULL); unlock = built_in_decls[BUILT_IN_GOMP_CRITICAL_NAME_END]; unlock = build_function_call_expr (unlock, args); } else { lock = built_in_decls[BUILT_IN_GOMP_CRITICAL_START]; lock = build_function_call_expr (lock, NULL); unlock = built_in_decls[BUILT_IN_GOMP_CRITICAL_END]; unlock = build_function_call_expr (unlock, NULL); } push_gimplify_context (); block = make_node (BLOCK); *stmt_p = bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, block); TREE_SIDE_EFFECTS (bind) = 1; append_to_statement_list (stmt, &BIND_EXPR_BODY (bind)); gimplify_and_add (lock, &BIND_EXPR_BODY (bind)); lower_omp (&OMP_CRITICAL_BODY (stmt), ctx); maybe_catch_exception (&OMP_CRITICAL_BODY (stmt)); append_to_statement_list (OMP_CRITICAL_BODY (stmt), &BIND_EXPR_BODY (bind)); OMP_CRITICAL_BODY (stmt) = NULL; gimplify_and_add (unlock, &BIND_EXPR_BODY (bind)); t = make_node (OMP_RETURN); OMP_RETURN_NOWAIT (t) = 1; append_to_statement_list (t, &BIND_EXPR_BODY (bind)); pop_gimplify_context (bind); BIND_EXPR_VARS (bind) = chainon (BIND_EXPR_VARS (bind), ctx->block_vars); BLOCK_VARS (block) = BIND_EXPR_VARS (bind); } /* A subroutine of lower_omp_for. Generate code to emit the predicate for a lastprivate clause. Given a loop control predicate of (V cond N2), we gate the clause on (!(V cond N2)). The lowered form is appended to *DLIST, iterator initialization is appended to *BODY_P. */ static void lower_omp_for_lastprivate (struct omp_for_data *fd, tree *body_p, tree *dlist, struct omp_context *ctx) { tree clauses, cond, stmts, vinit, t; enum tree_code cond_code; cond_code = fd->cond_code; cond_code = cond_code == LT_EXPR ? GE_EXPR : LE_EXPR; /* When possible, use a strict equality expression. This can let VRP type optimizations deduce the value and remove a copy. */ if (host_integerp (fd->step, 0)) { HOST_WIDE_INT step = TREE_INT_CST_LOW (fd->step); if (step == 1 || step == -1) cond_code = EQ_EXPR; } cond = build2 (cond_code, boolean_type_node, fd->v, fd->n2); clauses = OMP_FOR_CLAUSES (fd->for_stmt); stmts = NULL; lower_lastprivate_clauses (clauses, cond, &stmts, ctx); if (stmts != NULL) { append_to_statement_list (stmts, dlist); /* Optimize: v = 0; is usually cheaper than v = some_other_constant. */ vinit = fd->n1; if (cond_code == EQ_EXPR && host_integerp (fd->n2, 0) && ! integer_zerop (fd->n2)) vinit = build_int_cst (TREE_TYPE (fd->v), 0); /* Initialize the iterator variable, so that threads that don't execute any iterations don't execute the lastprivate clauses by accident. */ t = build2 (MODIFY_EXPR, void_type_node, fd->v, vinit); gimplify_and_add (t, body_p); } } /* Lower code for an OpenMP loop directive. */ static void lower_omp_for (tree *stmt_p, omp_context *ctx) { tree t, stmt, ilist, dlist, new_stmt, *body_p, *rhs_p; struct omp_for_data fd; stmt = *stmt_p; push_gimplify_context (); lower_omp (&OMP_FOR_PRE_BODY (stmt), ctx); lower_omp (&OMP_FOR_BODY (stmt), ctx); /* Move declaration of temporaries in the loop body before we make it go away. */ if (TREE_CODE (OMP_FOR_BODY (stmt)) == BIND_EXPR) record_vars_into (BIND_EXPR_VARS (OMP_FOR_BODY (stmt)), ctx->cb.dst_fn); new_stmt = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL); TREE_SIDE_EFFECTS (new_stmt) = 1; body_p = &BIND_EXPR_BODY (new_stmt); /* The pre-body and input clauses go before the lowered OMP_FOR. */ ilist = NULL; dlist = NULL; append_to_statement_list (OMP_FOR_PRE_BODY (stmt), body_p); lower_rec_input_clauses (OMP_FOR_CLAUSES (stmt), body_p, &dlist, ctx); /* Lower the header expressions. At this point, we can assume that the header is of the form: #pragma omp for (V = VAL1; V {<|>|<=|>=} VAL2; V = V [+-] VAL3) We just need to make sure that VAL1, VAL2 and VAL3 are lowered using the .omp_data_s mapping, if needed. */ rhs_p = &TREE_OPERAND (OMP_FOR_INIT (stmt), 1); if (!is_gimple_min_invariant (*rhs_p)) *rhs_p = get_formal_tmp_var (*rhs_p, body_p); rhs_p = &TREE_OPERAND (OMP_FOR_COND (stmt), 1); if (!is_gimple_min_invariant (*rhs_p)) *rhs_p = get_formal_tmp_var (*rhs_p, body_p); rhs_p = &TREE_OPERAND (TREE_OPERAND (OMP_FOR_INCR (stmt), 1), 1); if (!is_gimple_min_invariant (*rhs_p)) *rhs_p = get_formal_tmp_var (*rhs_p, body_p); /* Once lowered, extract the bounds and clauses. */ extract_omp_for_data (stmt, &fd); lower_omp_for_lastprivate (&fd, body_p, &dlist, ctx); append_to_statement_list (stmt, body_p); append_to_statement_list (OMP_FOR_BODY (stmt), body_p); t = make_node (OMP_CONTINUE); append_to_statement_list (t, body_p); /* After the loop, add exit clauses. */ lower_reduction_clauses (OMP_FOR_CLAUSES (stmt), body_p, ctx); append_to_statement_list (dlist, body_p); maybe_catch_exception (body_p); /* Region exit marker goes at the end of the loop body. */ t = make_node (OMP_RETURN); OMP_RETURN_NOWAIT (t) = fd.have_nowait; append_to_statement_list (t, body_p); pop_gimplify_context (NULL_TREE); record_vars_into (ctx->block_vars, ctx->cb.dst_fn); OMP_FOR_BODY (stmt) = NULL_TREE; OMP_FOR_PRE_BODY (stmt) = NULL_TREE; *stmt_p = new_stmt; } /* Callback for walk_stmts. Check if *TP only contains OMP_FOR or OMP_PARALLEL. */ static tree check_combined_parallel (tree *tp, int *walk_subtrees, void *data) { struct walk_stmt_info *wi = data; int *info = wi->info; *walk_subtrees = 0; switch (TREE_CODE (*tp)) { case OMP_FOR: case OMP_SECTIONS: *info = *info == 0 ? 1 : -1; break; default: *info = -1; break; } return NULL; } /* Lower the OpenMP parallel directive in *STMT_P. CTX holds context information for the directive. */ static void lower_omp_parallel (tree *stmt_p, omp_context *ctx) { tree clauses, par_bind, par_body, new_body, bind; tree olist, ilist, par_olist, par_ilist; tree stmt, child_fn, t; stmt = *stmt_p; clauses = OMP_PARALLEL_CLAUSES (stmt); par_bind = OMP_PARALLEL_BODY (stmt); par_body = BIND_EXPR_BODY (par_bind); child_fn = ctx->cb.dst_fn; if (!OMP_PARALLEL_COMBINED (stmt)) { struct walk_stmt_info wi; int ws_num = 0; memset (&wi, 0, sizeof (wi)); wi.callback = check_combined_parallel; wi.info = &ws_num; wi.val_only = true; walk_stmts (&wi, &par_bind); if (ws_num == 1) OMP_PARALLEL_COMBINED (stmt) = 1; } push_gimplify_context (); par_olist = NULL_TREE; par_ilist = NULL_TREE; lower_rec_input_clauses (clauses, &par_ilist, &par_olist, ctx); lower_omp (&par_body, ctx); lower_reduction_clauses (clauses, &par_olist, ctx); /* Declare all the variables created by mapping and the variables declared in the scope of the parallel body. */ record_vars_into (ctx->block_vars, child_fn); record_vars_into (BIND_EXPR_VARS (par_bind), child_fn); if (ctx->record_type) { ctx->sender_decl = create_tmp_var (ctx->record_type, ".omp_data_o"); OMP_PARALLEL_DATA_ARG (stmt) = ctx->sender_decl; } olist = NULL_TREE; ilist = NULL_TREE; lower_send_clauses (clauses, &ilist, &olist, ctx); lower_send_shared_vars (&ilist, &olist, ctx); /* Once all the expansions are done, sequence all the different fragments inside OMP_PARALLEL_BODY. */ bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL); append_to_statement_list (ilist, &BIND_EXPR_BODY (bind)); new_body = alloc_stmt_list (); if (ctx->record_type) { t = build_fold_addr_expr (ctx->sender_decl); /* fixup_child_record_type might have changed receiver_decl's type. */ t = fold_convert (TREE_TYPE (ctx->receiver_decl), t); t = build2 (MODIFY_EXPR, void_type_node, ctx->receiver_decl, t); append_to_statement_list (t, &new_body); } append_to_statement_list (par_ilist, &new_body); append_to_statement_list (par_body, &new_body); append_to_statement_list (par_olist, &new_body); maybe_catch_exception (&new_body); t = make_node (OMP_RETURN); append_to_statement_list (t, &new_body); OMP_PARALLEL_BODY (stmt) = new_body; append_to_statement_list (stmt, &BIND_EXPR_BODY (bind)); append_to_statement_list (olist, &BIND_EXPR_BODY (bind)); *stmt_p = bind; pop_gimplify_context (NULL_TREE); } /* Pass *TP back through the gimplifier within the context determined by WI. This handles replacement of DECL_VALUE_EXPR, as well as adjusting the flags on ADDR_EXPR. */ static void lower_regimplify (tree *tp, struct walk_stmt_info *wi) { enum gimplify_status gs; tree pre = NULL; if (wi->is_lhs) gs = gimplify_expr (tp, &pre, NULL, is_gimple_lvalue, fb_lvalue); else if (wi->val_only) gs = gimplify_expr (tp, &pre, NULL, is_gimple_val, fb_rvalue); else gs = gimplify_expr (tp, &pre, NULL, is_gimple_formal_tmp_var, fb_rvalue); gcc_assert (gs == GS_ALL_DONE); if (pre) tsi_link_before (&wi->tsi, pre, TSI_SAME_STMT); } /* Copy EXP into a temporary. Insert the initialization statement before TSI. */ static tree init_tmp_var (tree exp, tree_stmt_iterator *tsi) { tree t, stmt; t = create_tmp_var (TREE_TYPE (exp), NULL); if (TREE_CODE (TREE_TYPE (t)) == COMPLEX_TYPE) DECL_COMPLEX_GIMPLE_REG_P (t) = 1; stmt = build2 (MODIFY_EXPR, TREE_TYPE (t), t, exp); SET_EXPR_LOCUS (stmt, EXPR_LOCUS (tsi_stmt (*tsi))); tsi_link_before (tsi, stmt, TSI_SAME_STMT); return t; } /* Similarly, but copy from the temporary and insert the statement after the iterator. */ static tree save_tmp_var (tree exp, tree_stmt_iterator *tsi) { tree t, stmt; t = create_tmp_var (TREE_TYPE (exp), NULL); if (TREE_CODE (TREE_TYPE (t)) == COMPLEX_TYPE) DECL_COMPLEX_GIMPLE_REG_P (t) = 1; stmt = build2 (MODIFY_EXPR, TREE_TYPE (t), exp, t); SET_EXPR_LOCUS (stmt, EXPR_LOCUS (tsi_stmt (*tsi))); tsi_link_after (tsi, stmt, TSI_SAME_STMT); return t; } /* Callback for walk_stmts. Lower the OpenMP directive pointed by TP. */ static tree lower_omp_1 (tree *tp, int *walk_subtrees, void *data) { struct walk_stmt_info *wi = data; omp_context *ctx = wi->info; tree t = *tp; /* If we have issued syntax errors, avoid doing any heavy lifting. Just replace the OpenMP directives with a NOP to avoid confusing RTL expansion. */ if (errorcount && OMP_DIRECTIVE_P (*tp)) { *tp = build_empty_stmt (); return NULL_TREE; } *walk_subtrees = 0; switch (TREE_CODE (*tp)) { case OMP_PARALLEL: ctx = maybe_lookup_ctx (t); lower_omp_parallel (tp, ctx); break; case OMP_FOR: ctx = maybe_lookup_ctx (t); gcc_assert (ctx); lower_omp_for (tp, ctx); break; case OMP_SECTIONS: ctx = maybe_lookup_ctx (t); gcc_assert (ctx); lower_omp_sections (tp, ctx); break; case OMP_SINGLE: ctx = maybe_lookup_ctx (t); gcc_assert (ctx); lower_omp_single (tp, ctx); break; case OMP_MASTER: ctx = maybe_lookup_ctx (t); gcc_assert (ctx); lower_omp_master (tp, ctx); break; case OMP_ORDERED: ctx = maybe_lookup_ctx (t); gcc_assert (ctx); lower_omp_ordered (tp, ctx); break; case OMP_CRITICAL: ctx = maybe_lookup_ctx (t); gcc_assert (ctx); lower_omp_critical (tp, ctx); break; case VAR_DECL: if (ctx && DECL_HAS_VALUE_EXPR_P (t)) { lower_regimplify (&t, wi); if (wi->val_only) { if (wi->is_lhs) t = save_tmp_var (t, &wi->tsi); else t = init_tmp_var (t, &wi->tsi); } *tp = t; } break; case ADDR_EXPR: if (ctx) lower_regimplify (tp, wi); break; case ARRAY_REF: case ARRAY_RANGE_REF: case REALPART_EXPR: case IMAGPART_EXPR: case COMPONENT_REF: case VIEW_CONVERT_EXPR: if (ctx) lower_regimplify (tp, wi); break; case INDIRECT_REF: if (ctx) { wi->is_lhs = false; wi->val_only = true; lower_regimplify (&TREE_OPERAND (t, 0), wi); } break; default: if (!TYPE_P (t) && !DECL_P (t)) *walk_subtrees = 1; break; } return NULL_TREE; } static void lower_omp (tree *stmt_p, omp_context *ctx) { struct walk_stmt_info wi; memset (&wi, 0, sizeof (wi)); wi.callback = lower_omp_1; wi.info = ctx; wi.val_only = true; wi.want_locations = true; walk_stmts (&wi, stmt_p); } /* Main entry point. */ static unsigned int execute_lower_omp (void) { all_contexts = splay_tree_new (splay_tree_compare_pointers, 0, delete_omp_context); scan_omp (&DECL_SAVED_TREE (current_function_decl), NULL); gcc_assert (parallel_nesting_level == 0); if (all_contexts->root) lower_omp (&DECL_SAVED_TREE (current_function_decl), NULL); if (all_contexts) { splay_tree_delete (all_contexts); all_contexts = NULL; } return 0; } static bool gate_lower_omp (void) { return flag_openmp != 0; } struct tree_opt_pass pass_lower_omp = { "omplower", /* name */ gate_lower_omp, /* gate */ execute_lower_omp, /* execute */ NULL, /* sub */ NULL, /* next */ 0, /* static_pass_number */ 0, /* tv_id */ PROP_gimple_any, /* properties_required */ PROP_gimple_lomp, /* properties_provided */ 0, /* properties_destroyed */ 0, /* todo_flags_start */ TODO_dump_func, /* todo_flags_finish */ 0 /* letter */ }; /* The following is a utility to diagnose OpenMP structured block violations. It is not part of the "omplower" pass, as that's invoked too late. It should be invoked by the respective front ends after gimplification. */ static splay_tree all_labels; /* Check for mismatched contexts and generate an error if needed. Return true if an error is detected. */ static bool diagnose_sb_0 (tree *stmt_p, tree branch_ctx, tree label_ctx) { bool exit_p = true; if ((label_ctx ? TREE_VALUE (label_ctx) : NULL) == branch_ctx) return false; /* Try to avoid confusing the user by producing and error message with correct "exit" or "enter" verbage. We prefer "exit" unless we can show that LABEL_CTX is nested within BRANCH_CTX. */ if (branch_ctx == NULL) exit_p = false; else { while (label_ctx) { if (TREE_VALUE (label_ctx) == branch_ctx) { exit_p = false; break; } label_ctx = TREE_CHAIN (label_ctx); } } if (exit_p) error ("invalid exit from OpenMP structured block"); else error ("invalid entry to OpenMP structured block"); *stmt_p = build_empty_stmt (); return true; } /* Pass 1: Create a minimal tree of OpenMP structured blocks, and record where in the tree each label is found. */ static tree diagnose_sb_1 (tree *tp, int *walk_subtrees, void *data) { struct walk_stmt_info *wi = data; tree context = (tree) wi->info; tree inner_context; tree t = *tp; *walk_subtrees = 0; switch (TREE_CODE (t)) { case OMP_PARALLEL: case OMP_SECTIONS: case OMP_SINGLE: walk_tree (&OMP_CLAUSES (t), diagnose_sb_1, wi, NULL); /* FALLTHRU */ case OMP_SECTION: case OMP_MASTER: case OMP_ORDERED: case OMP_CRITICAL: /* The minimal context here is just a tree of statements. */ inner_context = tree_cons (NULL, t, context); wi->info = inner_context; walk_stmts (wi, &OMP_BODY (t)); wi->info = context; break; case OMP_FOR: walk_tree (&OMP_FOR_CLAUSES (t), diagnose_sb_1, wi, NULL); inner_context = tree_cons (NULL, t, context); wi->info = inner_context; walk_tree (&OMP_FOR_INIT (t), diagnose_sb_1, wi, NULL); walk_tree (&OMP_FOR_COND (t), diagnose_sb_1, wi, NULL); walk_tree (&OMP_FOR_INCR (t), diagnose_sb_1, wi, NULL); walk_stmts (wi, &OMP_FOR_PRE_BODY (t)); walk_stmts (wi, &OMP_FOR_BODY (t)); wi->info = context; break; case LABEL_EXPR: splay_tree_insert (all_labels, (splay_tree_key) LABEL_EXPR_LABEL (t), (splay_tree_value) context); break; default: break; } return NULL_TREE; } /* Pass 2: Check each branch and see if its context differs from that of the destination label's context. */ static tree diagnose_sb_2 (tree *tp, int *walk_subtrees, void *data) { struct walk_stmt_info *wi = data; tree context = (tree) wi->info; splay_tree_node n; tree t = *tp; *walk_subtrees = 0; switch (TREE_CODE (t)) { case OMP_PARALLEL: case OMP_SECTIONS: case OMP_SINGLE: walk_tree (&OMP_CLAUSES (t), diagnose_sb_2, wi, NULL); /* FALLTHRU */ case OMP_SECTION: case OMP_MASTER: case OMP_ORDERED: case OMP_CRITICAL: wi->info = t; walk_stmts (wi, &OMP_BODY (t)); wi->info = context; break; case OMP_FOR: walk_tree (&OMP_FOR_CLAUSES (t), diagnose_sb_2, wi, NULL); wi->info = t; walk_tree (&OMP_FOR_INIT (t), diagnose_sb_2, wi, NULL); walk_tree (&OMP_FOR_COND (t), diagnose_sb_2, wi, NULL); walk_tree (&OMP_FOR_INCR (t), diagnose_sb_2, wi, NULL); walk_stmts (wi, &OMP_FOR_PRE_BODY (t)); walk_stmts (wi, &OMP_FOR_BODY (t)); wi->info = context; break; case GOTO_EXPR: { tree lab = GOTO_DESTINATION (t); if (TREE_CODE (lab) != LABEL_DECL) break; n = splay_tree_lookup (all_labels, (splay_tree_key) lab); diagnose_sb_0 (tp, context, n ? (tree) n->value : NULL_TREE); } break; case SWITCH_EXPR: { tree vec = SWITCH_LABELS (t); int i, len = TREE_VEC_LENGTH (vec); for (i = 0; i < len; ++i) { tree lab = CASE_LABEL (TREE_VEC_ELT (vec, i)); n = splay_tree_lookup (all_labels, (splay_tree_key) lab); if (diagnose_sb_0 (tp, context, (tree) n->value)) break; } } break; case RETURN_EXPR: diagnose_sb_0 (tp, context, NULL_TREE); break; default: break; } return NULL_TREE; } void diagnose_omp_structured_block_errors (tree fndecl) { tree save_current = current_function_decl; struct walk_stmt_info wi; current_function_decl = fndecl; all_labels = splay_tree_new (splay_tree_compare_pointers, 0, 0); memset (&wi, 0, sizeof (wi)); wi.callback = diagnose_sb_1; walk_stmts (&wi, &DECL_SAVED_TREE (fndecl)); memset (&wi, 0, sizeof (wi)); wi.callback = diagnose_sb_2; wi.want_locations = true; wi.want_return_expr = true; walk_stmts (&wi, &DECL_SAVED_TREE (fndecl)); splay_tree_delete (all_labels); all_labels = NULL; current_function_decl = save_current; } #include "gt-omp-low.h"
TM_to_I8.c
//START_INCLUDES #include "q_incs.h" //STOP_INCLUDES #include "TM_to_I8.h" //START_FUNC_DECL int TM_to_I8( struct tm *inv, uint64_t n_in, int64_t * outv ) //STOP_FUNC_DECL { int status = 0; if ( inv == NULL ) { go_BYE(-1); } if ( outv == NULL ) { go_BYE(-1); } if ( n_in == 0 ) { go_BYE(-1); } // TODO #pragma omp parallel for schedule(static, 1024) for ( uint64_t i = 0; i < n_in; i++ ) { struct tm ltm = inv[i]; time_t tval = mktime(&ltm); outv[i] = (int64_t)tval; } cBYE(status); BYE: return status; }
quantize.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % QQQ U U AAA N N TTTTT IIIII ZZZZZ EEEEE % % Q Q U U A A NN N T I ZZ E % % Q Q U U AAAAA N N N T I ZZZ EEEEE % % Q QQ U U A A N NN T I ZZ E % % QQQQ UUU A A N N T IIIII ZZZZZ EEEEE % % % % % % MagickCore Methods to Reduce the Number of Unique Colors in an Image % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2016 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Realism in computer graphics typically requires using 24 bits/pixel to % generate an image. Yet many graphic display devices do not contain the % amount of memory necessary to match the spatial and color resolution of % the human eye. The Quantize methods takes a 24 bit image and reduces % the number of colors so it can be displayed on raster device with less % bits per pixel. In most instances, the quantized image closely % resembles the original reference image. % % A reduction of colors in an image is also desirable for image % transmission and real-time animation. % % QuantizeImage() takes a standard RGB or monochrome images and quantizes % them down to some fixed number of colors. % % For purposes of color allocation, an image is a set of n pixels, where % each pixel is a point in RGB space. RGB space is a 3-dimensional % vector space, and each pixel, Pi, is defined by an ordered triple of % red, green, and blue coordinates, (Ri, Gi, Bi). % % Each primary color component (red, green, or blue) represents an % intensity which varies linearly from 0 to a maximum value, Cmax, which % corresponds to full saturation of that color. Color allocation is % defined over a domain consisting of the cube in RGB space with opposite % vertices at (0,0,0) and (Cmax, Cmax, Cmax). QUANTIZE requires Cmax = % 255. % % The algorithm maps this domain onto a tree in which each node % represents a cube within that domain. In the following discussion % these cubes are defined by the coordinate of two opposite vertices (vertex % nearest the origin in RGB space and the vertex farthest from the origin). % % The tree's root node represents the entire domain, (0,0,0) through % (Cmax,Cmax,Cmax). Each lower level in the tree is generated by % subdividing one node's cube into eight smaller cubes of equal size. % This corresponds to bisecting the parent cube with planes passing % through the midpoints of each edge. % % The basic algorithm operates in three phases: Classification, % Reduction, and Assignment. Classification builds a color description % tree for the image. Reduction collapses the tree until the number it % represents, at most, the number of colors desired in the output image. % Assignment defines the output image's color map and sets each pixel's % color by restorage_class in the reduced tree. Our goal is to minimize % the numerical discrepancies between the original colors and quantized % colors (quantization error). % % Classification begins by initializing a color description tree of % sufficient depth to represent each possible input color in a leaf. % However, it is impractical to generate a fully-formed color description % tree in the storage_class phase for realistic values of Cmax. If % colors components in the input image are quantized to k-bit precision, % so that Cmax= 2k-1, the tree would need k levels below the root node to % allow representing each possible input color in a leaf. This becomes % prohibitive because the tree's total number of nodes is 1 + % sum(i=1, k, 8k). % % A complete tree would require 19,173,961 nodes for k = 8, Cmax = 255. % Therefore, to avoid building a fully populated tree, QUANTIZE: (1) % Initializes data structures for nodes only as they are needed; (2) % Chooses a maximum depth for the tree as a function of the desired % number of colors in the output image (currently log2(colormap size)). % % For each pixel in the input image, storage_class scans downward from % the root of the color description tree. At each level of the tree it % identifies the single node which represents a cube in RGB space % containing the pixel's color. It updates the following data for each % such node: % % n1: Number of pixels whose color is contained in the RGB cube which % this node represents; % % n2: Number of pixels whose color is not represented in a node at % lower depth in the tree; initially, n2 = 0 for all nodes except % leaves of the tree. % % Sr, Sg, Sb: Sums of the red, green, and blue component values for all % pixels not classified at a lower depth. The combination of these sums % and n2 will ultimately characterize the mean color of a set of % pixels represented by this node. % % E: the distance squared in RGB space between each pixel contained % within a node and the nodes' center. This represents the % quantization error for a node. % % Reduction repeatedly prunes the tree until the number of nodes with n2 % > 0 is less than or equal to the maximum number of colors allowed in % the output image. On any given iteration over the tree, it selects % those nodes whose E count is minimal for pruning and merges their color % statistics upward. It uses a pruning threshold, Ep, to govern node % selection as follows: % % Ep = 0 % while number of nodes with (n2 > 0) > required maximum number of colors % prune all nodes such that E <= Ep % Set Ep to minimum E in remaining nodes % % This has the effect of minimizing any quantization error when merging % two nodes together. % % When a node to be pruned has offspring, the pruning procedure invokes % itself recursively in order to prune the tree from the leaves upward. % n2, Sr, Sg, and Sb in a node being pruned are always added to the % corresponding data in that node's parent. This retains the pruned % node's color characteristics for later averaging. % % For each node, n2 pixels exist for which that node represents the % smallest volume in RGB space containing those pixel's colors. When n2 % > 0 the node will uniquely define a color in the output image. At the % beginning of reduction, n2 = 0 for all nodes except a the leaves of % the tree which represent colors present in the input image. % % The other pixel count, n1, indicates the total number of colors within % the cubic volume which the node represents. This includes n1 - n2 % pixels whose colors should be defined by nodes at a lower level in the % tree. % % Assignment generates the output image from the pruned tree. The output % image consists of two parts: (1) A color map, which is an array of % color descriptions (RGB triples) for each color present in the output % image; (2) A pixel array, which represents each pixel as an index % into the color map array. % % First, the assignment phase makes one pass over the pruned color % description tree to establish the image's color map. For each node % with n2 > 0, it divides Sr, Sg, and Sb by n2 . This produces the mean % color of all pixels that classify no lower than this node. Each of % these colors becomes an entry in the color map. % % Finally, the assignment phase reclassifies each pixel in the pruned % tree to identify the deepest node containing the pixel's color. The % pixel's value in the pixel array becomes the index of this node's mean % color in the color map. % % This method is based on a similar algorithm written by Paul Raveling. % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/attribute.h" #include "magick/cache-view.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colormap.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/enhance.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/histogram.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/memory_.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/pixel-private.h" #include "magick/quantize.h" #include "magick/quantum.h" #include "magick/resource_.h" #include "magick/string_.h" #include "magick/thread-private.h" /* Define declarations. */ #if !defined(__APPLE__) && !defined(TARGET_OS_IPHONE) #define CacheShift 2 #else #define CacheShift 3 #endif #define ErrorQueueLength 16 #define MaxNodes 266817 #define MaxTreeDepth 8 #define NodesInAList 1920 /* Typdef declarations. */ typedef struct _NodeInfo { struct _NodeInfo *parent, *child[16]; MagickSizeType number_unique; DoublePixelPacket total_color; MagickRealType quantize_error; size_t color_number, id, level; } NodeInfo; typedef struct _Nodes { NodeInfo *nodes; struct _Nodes *next; } Nodes; typedef struct _CubeInfo { NodeInfo *root; size_t colors, maximum_colors; ssize_t transparent_index; MagickSizeType transparent_pixels; DoublePixelPacket target; MagickRealType distance, pruning_threshold, next_threshold; size_t nodes, free_nodes, color_number; NodeInfo *next_node; Nodes *node_queue; MemoryInfo *memory_info; ssize_t *cache; DoublePixelPacket error[ErrorQueueLength]; MagickRealType weights[ErrorQueueLength]; QuantizeInfo *quantize_info; MagickBooleanType associate_alpha; ssize_t x, y; size_t depth; MagickOffsetType offset; MagickSizeType span; } CubeInfo; /* Method prototypes. */ static CubeInfo *GetCubeInfo(const QuantizeInfo *,const size_t,const size_t); static NodeInfo *GetNodeInfo(CubeInfo *,const size_t,const size_t,NodeInfo *); static MagickBooleanType AssignImageColors(Image *,CubeInfo *), ClassifyImageColors(CubeInfo *,const Image *,ExceptionInfo *), DitherImage(Image *,CubeInfo *), SetGrayscaleImage(Image *); static size_t DefineImageColormap(Image *,CubeInfo *,NodeInfo *); static void ClosestColor(const Image *,CubeInfo *,const NodeInfo *), DestroyCubeInfo(CubeInfo *), PruneLevel(CubeInfo *,const NodeInfo *), PruneToCubeDepth(CubeInfo *,const NodeInfo *), ReduceImageColors(const Image *,CubeInfo *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e Q u a n t i z e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireQuantizeInfo() allocates the QuantizeInfo structure. % % The format of the AcquireQuantizeInfo method is: % % QuantizeInfo *AcquireQuantizeInfo(const ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: the image info. % */ MagickExport QuantizeInfo *AcquireQuantizeInfo(const ImageInfo *image_info) { QuantizeInfo *quantize_info; quantize_info=(QuantizeInfo *) AcquireMagickMemory(sizeof(*quantize_info)); if (quantize_info == (QuantizeInfo *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); GetQuantizeInfo(quantize_info); if (image_info != (ImageInfo *) NULL) { const char *option; quantize_info->dither=image_info->dither; option=GetImageOption(image_info,"dither"); if (option != (const char *) NULL) quantize_info->dither_method=(DitherMethod) ParseCommandOption( MagickDitherOptions,MagickFalse,option); quantize_info->measure_error=image_info->verbose; } return(quantize_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + A s s i g n I m a g e C o l o r s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AssignImageColors() generates the output image from the pruned tree. The % output image consists of two parts: (1) A color map, which is an array % of color descriptions (RGB triples) for each color present in the % output image; (2) A pixel array, which represents each pixel as an % index into the color map array. % % First, the assignment phase makes one pass over the pruned color % description tree to establish the image's color map. For each node % with n2 > 0, it divides Sr, Sg, and Sb by n2 . This produces the mean % color of all pixels that classify no lower than this node. Each of % these colors becomes an entry in the color map. % % Finally, the assignment phase reclassifies each pixel in the pruned % tree to identify the deepest node containing the pixel's color. The % pixel's value in the pixel array becomes the index of this node's mean % color in the color map. % % The format of the AssignImageColors() method is: % % MagickBooleanType AssignImageColors(Image *image,CubeInfo *cube_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % */ static inline void AssociateAlphaPixel(const CubeInfo *cube_info, const PixelPacket *pixel,DoublePixelPacket *alpha_pixel) { MagickRealType alpha; alpha_pixel->index=0; if ((cube_info->associate_alpha == MagickFalse) || (pixel->opacity == OpaqueOpacity)) { alpha_pixel->red=(MagickRealType) GetPixelRed(pixel); alpha_pixel->green=(MagickRealType) GetPixelGreen(pixel); alpha_pixel->blue=(MagickRealType) GetPixelBlue(pixel); alpha_pixel->opacity=(MagickRealType) GetPixelOpacity(pixel); return; } alpha=(MagickRealType) (QuantumScale*(QuantumRange-GetPixelOpacity(pixel))); alpha_pixel->red=alpha*GetPixelRed(pixel); alpha_pixel->green=alpha*GetPixelGreen(pixel); alpha_pixel->blue=alpha*GetPixelBlue(pixel); alpha_pixel->opacity=(MagickRealType) GetPixelOpacity(pixel); } static inline size_t ColorToNodeId(const CubeInfo *cube_info, const DoublePixelPacket *pixel,size_t index) { size_t id; id=(size_t) (((ScaleQuantumToChar(ClampPixel(GetPixelRed(pixel))) >> index) & 0x01) | ((ScaleQuantumToChar(ClampPixel(GetPixelGreen(pixel))) >> index) & 0x01) << 1 | ((ScaleQuantumToChar(ClampPixel(GetPixelBlue(pixel))) >> index) & 0x01) << 2); if (cube_info->associate_alpha != MagickFalse) id|=((ScaleQuantumToChar(ClampPixel(GetPixelOpacity(pixel))) >> index) & 0x1) << 3; return(id); } static inline MagickBooleanType IsSameColor(const Image *image, const PixelPacket *p,const PixelPacket *q) { if ((GetPixelRed(p) != GetPixelRed(q)) || (GetPixelGreen(p) != GetPixelGreen(q)) || (GetPixelBlue(p) != GetPixelBlue(q))) return(MagickFalse); if ((image->matte != MagickFalse) && (GetPixelOpacity(p) != GetPixelOpacity(q))) return(MagickFalse); return(MagickTrue); } static MagickBooleanType AssignImageColors(Image *image,CubeInfo *cube_info) { #define AssignImageTag "Assign/Image" ssize_t y; /* Allocate image colormap. */ if ((cube_info->quantize_info->colorspace != UndefinedColorspace) && (cube_info->quantize_info->colorspace != CMYKColorspace)) (void) TransformImageColorspace(image,cube_info->quantize_info->colorspace); else if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) (void) TransformImageColorspace(image,sRGBColorspace); if (AcquireImageColormap(image,cube_info->colors) == MagickFalse) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); image->colors=0; cube_info->transparent_pixels=0; cube_info->transparent_index=(-1); (void) DefineImageColormap(image,cube_info,cube_info->root); /* Create a reduced color image. */ if ((cube_info->quantize_info->dither != MagickFalse) && (cube_info->quantize_info->dither_method != NoDitherMethod)) (void) DitherImage(image,cube_info); else { CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; status=MagickTrue; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { CubeInfo cube; register IndexPacket *magick_restrict indexes; register PixelPacket *magick_restrict q; register ssize_t x; ssize_t count; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); cube=(*cube_info); for (x=0; x < (ssize_t) image->columns; x+=count) { DoublePixelPacket pixel; register const NodeInfo *node_info; register ssize_t i; size_t id, index; /* Identify the deepest node containing the pixel's color. */ for (count=1; (x+count) < (ssize_t) image->columns; count++) if (IsSameColor(image,q,q+count) == MagickFalse) break; AssociateAlphaPixel(&cube,q,&pixel); node_info=cube.root; for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--) { id=ColorToNodeId(&cube,&pixel,index); if (node_info->child[id] == (NodeInfo *) NULL) break; node_info=node_info->child[id]; } /* Find closest color among siblings and their children. */ cube.target=pixel; cube.distance=(MagickRealType) (4.0*(QuantumRange+1.0)* (QuantumRange+1.0)+1.0); ClosestColor(image,&cube,node_info->parent); index=cube.color_number; for (i=0; i < (ssize_t) count; i++) { if (image->storage_class == PseudoClass) SetPixelIndex(indexes+x+i,index); if (cube.quantize_info->measure_error == MagickFalse) { SetPixelRgb(q,image->colormap+index); if (cube.associate_alpha != MagickFalse) SetPixelOpacity(q,image->colormap[index].opacity); } q++; } } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_AssignImageColors) #endif proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); } if (cube_info->quantize_info->measure_error != MagickFalse) (void) GetImageQuantizeError(image); if ((cube_info->quantize_info->number_colors == 2) && (cube_info->quantize_info->colorspace == GRAYColorspace)) { double intensity; /* Monochrome image. */ intensity=0.0; if ((image->colors > 1) && (GetPixelLuma(image,image->colormap+0) > GetPixelLuma(image,image->colormap+1))) intensity=(double) QuantumRange; image->colormap[0].red=intensity; image->colormap[0].green=intensity; image->colormap[0].blue=intensity; if (image->colors > 1) { image->colormap[1].red=(double) QuantumRange-intensity; image->colormap[1].green=(double) QuantumRange-intensity; image->colormap[1].blue=(double) QuantumRange-intensity; } } (void) SyncImage(image); if ((cube_info->quantize_info->colorspace != UndefinedColorspace) && (cube_info->quantize_info->colorspace != CMYKColorspace)) (void) TransformImageColorspace((Image *) image,sRGBColorspace); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l a s s i f y I m a g e C o l o r s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClassifyImageColors() begins by initializing a color description tree % of sufficient depth to represent each possible input color in a leaf. % However, it is impractical to generate a fully-formed color % description tree in the storage_class phase for realistic values of % Cmax. If colors components in the input image are quantized to k-bit % precision, so that Cmax= 2k-1, the tree would need k levels below the % root node to allow representing each possible input color in a leaf. % This becomes prohibitive because the tree's total number of nodes is % 1 + sum(i=1,k,8k). % % A complete tree would require 19,173,961 nodes for k = 8, Cmax = 255. % Therefore, to avoid building a fully populated tree, QUANTIZE: (1) % Initializes data structures for nodes only as they are needed; (2) % Chooses a maximum depth for the tree as a function of the desired % number of colors in the output image (currently log2(colormap size)). % % For each pixel in the input image, storage_class scans downward from % the root of the color description tree. At each level of the tree it % identifies the single node which represents a cube in RGB space % containing It updates the following data for each such node: % % n1 : Number of pixels whose color is contained in the RGB cube % which this node represents; % % n2 : Number of pixels whose color is not represented in a node at % lower depth in the tree; initially, n2 = 0 for all nodes except % leaves of the tree. % % Sr, Sg, Sb : Sums of the red, green, and blue component values for % all pixels not classified at a lower depth. The combination of % these sums and n2 will ultimately characterize the mean color of a % set of pixels represented by this node. % % E: the distance squared in RGB space between each pixel contained % within a node and the nodes' center. This represents the quantization % error for a node. % % The format of the ClassifyImageColors() method is: % % MagickBooleanType ClassifyImageColors(CubeInfo *cube_info, % const Image *image,ExceptionInfo *exception) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o image: the image. % */ static inline void SetAssociatedAlpha(const Image *image,CubeInfo *cube_info) { MagickBooleanType associate_alpha; associate_alpha=image->matte; if ((cube_info->quantize_info->number_colors == 2) && (cube_info->quantize_info->colorspace == GRAYColorspace)) associate_alpha=MagickFalse; cube_info->associate_alpha=associate_alpha; } static MagickBooleanType ClassifyImageColors(CubeInfo *cube_info, const Image *image,ExceptionInfo *exception) { #define ClassifyImageTag "Classify/Image" CacheView *image_view; DoublePixelPacket error, mid, midpoint, pixel; MagickBooleanType proceed; MagickRealType bisect; NodeInfo *node_info; size_t count, id, index, level; ssize_t y; /* Classify the first cube_info->maximum_colors colors to a tree depth of 8. */ SetAssociatedAlpha(image,cube_info); if ((cube_info->quantize_info->colorspace != UndefinedColorspace) && (cube_info->quantize_info->colorspace != CMYKColorspace)) (void) TransformImageColorspace((Image *) image, cube_info->quantize_info->colorspace); else if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) (void) TransformImageColorspace((Image *) image,sRGBColorspace); midpoint.red=(MagickRealType) QuantumRange/2.0; midpoint.green=(MagickRealType) QuantumRange/2.0; midpoint.blue=(MagickRealType) QuantumRange/2.0; midpoint.opacity=(MagickRealType) QuantumRange/2.0; error.opacity=0.0; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *magick_restrict p; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; if (cube_info->nodes > MaxNodes) { /* Prune one level if the color tree is too large. */ PruneLevel(cube_info,cube_info->root); cube_info->depth--; } for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) count) { /* Start at the root and descend the color cube tree. */ for (count=1; (x+(ssize_t) count) < (ssize_t) image->columns; count++) if (IsSameColor(image,p,p+count) == MagickFalse) break; AssociateAlphaPixel(cube_info,p,&pixel); index=MaxTreeDepth-1; bisect=((MagickRealType) QuantumRange+1.0)/2.0; mid=midpoint; node_info=cube_info->root; for (level=1; level <= MaxTreeDepth; level++) { double distance; bisect*=0.5; id=ColorToNodeId(cube_info,&pixel,index); mid.red+=(id & 1) != 0 ? bisect : -bisect; mid.green+=(id & 2) != 0 ? bisect : -bisect; mid.blue+=(id & 4) != 0 ? bisect : -bisect; mid.opacity+=(id & 8) != 0 ? bisect : -bisect; if (node_info->child[id] == (NodeInfo *) NULL) { /* Set colors of new node to contain pixel. */ node_info->child[id]=GetNodeInfo(cube_info,id,level,node_info); if (node_info->child[id] == (NodeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); continue; } if (level == MaxTreeDepth) cube_info->colors++; } /* Approximate the quantization error represented by this node. */ node_info=node_info->child[id]; error.red=QuantumScale*(pixel.red-mid.red); error.green=QuantumScale*(pixel.green-mid.green); error.blue=QuantumScale*(pixel.blue-mid.blue); if (cube_info->associate_alpha != MagickFalse) error.opacity=QuantumScale*(pixel.opacity-mid.opacity); distance=(double) (error.red*error.red+error.green*error.green+ error.blue*error.blue+error.opacity*error.opacity); if (IsNaN(distance)) distance=0.0; node_info->quantize_error+=count*sqrt(distance); cube_info->root->quantize_error+=node_info->quantize_error; index--; } /* Sum RGB for this leaf for later derivation of the mean cube color. */ node_info->number_unique+=count; node_info->total_color.red+=count*QuantumScale*ClampPixel(pixel.red); node_info->total_color.green+=count*QuantumScale*ClampPixel(pixel.green); node_info->total_color.blue+=count*QuantumScale*ClampPixel(pixel.blue); if (cube_info->associate_alpha != MagickFalse) node_info->total_color.opacity+=count*QuantumScale* ClampPixel(pixel.opacity); else node_info->total_color.opacity+=count*QuantumScale* ClampPixel(OpaqueOpacity); p+=count; } if (cube_info->colors > cube_info->maximum_colors) { PruneToCubeDepth(cube_info,cube_info->root); break; } proceed=SetImageProgress(image,ClassifyImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) break; } for (y++; y < (ssize_t) image->rows; y++) { register const PixelPacket *magick_restrict p; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; if (cube_info->nodes > MaxNodes) { /* Prune one level if the color tree is too large. */ PruneLevel(cube_info,cube_info->root); cube_info->depth--; } for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) count) { /* Start at the root and descend the color cube tree. */ for (count=1; (x+(ssize_t) count) < (ssize_t) image->columns; count++) if (IsSameColor(image,p,p+count) == MagickFalse) break; AssociateAlphaPixel(cube_info,p,&pixel); index=MaxTreeDepth-1; bisect=((MagickRealType) QuantumRange+1.0)/2.0; mid=midpoint; node_info=cube_info->root; for (level=1; level <= cube_info->depth; level++) { double distance; bisect*=0.5; id=ColorToNodeId(cube_info,&pixel,index); mid.red+=(id & 1) != 0 ? bisect : -bisect; mid.green+=(id & 2) != 0 ? bisect : -bisect; mid.blue+=(id & 4) != 0 ? bisect : -bisect; mid.opacity+=(id & 8) != 0 ? bisect : -bisect; if (node_info->child[id] == (NodeInfo *) NULL) { /* Set colors of new node to contain pixel. */ node_info->child[id]=GetNodeInfo(cube_info,id,level,node_info); if (node_info->child[id] == (NodeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","%s", image->filename); continue; } if (level == cube_info->depth) cube_info->colors++; } /* Approximate the quantization error represented by this node. */ node_info=node_info->child[id]; error.red=QuantumScale*(pixel.red-mid.red); error.green=QuantumScale*(pixel.green-mid.green); error.blue=QuantumScale*(pixel.blue-mid.blue); if (cube_info->associate_alpha != MagickFalse) error.opacity=QuantumScale*(pixel.opacity-mid.opacity); distance=(double) (error.red*error.red+error.green*error.green+ error.blue*error.blue+error.opacity*error.opacity); if (IsNaN(distance)) distance=0.0; node_info->quantize_error+=count*sqrt(distance); cube_info->root->quantize_error+=node_info->quantize_error; index--; } /* Sum RGB for this leaf for later derivation of the mean cube color. */ node_info->number_unique+=count; node_info->total_color.red+=count*QuantumScale*ClampPixel(pixel.red); node_info->total_color.green+=count*QuantumScale*ClampPixel(pixel.green); node_info->total_color.blue+=count*QuantumScale*ClampPixel(pixel.blue); if (cube_info->associate_alpha != MagickFalse) node_info->total_color.opacity+=count*QuantumScale*ClampPixel( pixel.opacity); else node_info->total_color.opacity+=count*QuantumScale* ClampPixel(OpaqueOpacity); p+=count; } proceed=SetImageProgress(image,ClassifyImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) break; } image_view=DestroyCacheView(image_view); if ((cube_info->quantize_info->colorspace != UndefinedColorspace) && (cube_info->quantize_info->colorspace != CMYKColorspace)) (void) TransformImageColorspace((Image *) image,sRGBColorspace); return(y < (ssize_t) image->rows ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e Q u a n t i z e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneQuantizeInfo() makes a duplicate of the given quantize info structure, % or if quantize info is NULL, a new one. % % The format of the CloneQuantizeInfo method is: % % QuantizeInfo *CloneQuantizeInfo(const QuantizeInfo *quantize_info) % % A description of each parameter follows: % % o clone_info: Method CloneQuantizeInfo returns a duplicate of the given % quantize info, or if image info is NULL a new one. % % o quantize_info: a structure of type info. % */ MagickExport QuantizeInfo *CloneQuantizeInfo(const QuantizeInfo *quantize_info) { QuantizeInfo *clone_info; clone_info=(QuantizeInfo *) AcquireMagickMemory(sizeof(*clone_info)); if (clone_info == (QuantizeInfo *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); GetQuantizeInfo(clone_info); if (quantize_info == (QuantizeInfo *) NULL) return(clone_info); clone_info->number_colors=quantize_info->number_colors; clone_info->tree_depth=quantize_info->tree_depth; clone_info->dither=quantize_info->dither; clone_info->dither_method=quantize_info->dither_method; clone_info->colorspace=quantize_info->colorspace; clone_info->measure_error=quantize_info->measure_error; return(clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l o s e s t C o l o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClosestColor() traverses the color cube tree at a particular node and % determines which colormap entry best represents the input color. % % The format of the ClosestColor method is: % % void ClosestColor(const Image *image,CubeInfo *cube_info, % const NodeInfo *node_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % % o node_info: the address of a structure of type NodeInfo which points to a % node in the color cube tree that is to be pruned. % */ static void ClosestColor(const Image *image,CubeInfo *cube_info, const NodeInfo *node_info) { register ssize_t i; size_t number_children; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) ClosestColor(image,cube_info,node_info->child[i]); if (node_info->number_unique != 0) { MagickRealType pixel; register DoublePixelPacket *magick_restrict q; register MagickRealType alpha, beta, distance; register PixelPacket *magick_restrict p; /* Determine if this color is "closest". */ p=image->colormap+node_info->color_number; q=(&cube_info->target); alpha=1.0; beta=1.0; if (cube_info->associate_alpha != MagickFalse) { alpha=(MagickRealType) (QuantumScale*GetPixelAlpha(p)); beta=(MagickRealType) (QuantumScale*GetPixelAlpha(q)); } pixel=alpha*GetPixelRed(p)-beta*GetPixelRed(q); distance=pixel*pixel; if (distance <= cube_info->distance) { pixel=alpha*GetPixelGreen(p)-beta*GetPixelGreen(q); distance+=pixel*pixel; if (distance <= cube_info->distance) { pixel=alpha*GetPixelBlue(p)-beta*GetPixelBlue(q); distance+=pixel*pixel; if (distance <= cube_info->distance) { if (cube_info->associate_alpha != MagickFalse) { pixel=GetPixelAlpha(p)-GetPixelAlpha(q); distance+=pixel*pixel; } if (distance <= cube_info->distance) { cube_info->distance=distance; cube_info->color_number=node_info->color_number; } } } } } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o m p r e s s I m a g e C o l o r m a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CompressImageColormap() compresses an image colormap by removing any % duplicate or unused color entries. % % The format of the CompressImageColormap method is: % % MagickBooleanType CompressImageColormap(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType CompressImageColormap(Image *image) { QuantizeInfo quantize_info; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (IsPaletteImage(image,&image->exception) == MagickFalse) return(MagickFalse); GetQuantizeInfo(&quantize_info); quantize_info.number_colors=image->colors; quantize_info.tree_depth=MaxTreeDepth; return(QuantizeImage(&quantize_info,image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e f i n e I m a g e C o l o r m a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DefineImageColormap() traverses the color cube tree and notes each colormap % entry. A colormap entry is any node in the color cube tree where the % of unique colors is not zero. DefineImageColormap() returns the number of % colors in the image colormap. % % The format of the DefineImageColormap method is: % % size_t DefineImageColormap(Image *image,CubeInfo *cube_info, % NodeInfo *node_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % % o node_info: the address of a structure of type NodeInfo which points to a % node in the color cube tree that is to be pruned. % */ static size_t DefineImageColormap(Image *image,CubeInfo *cube_info, NodeInfo *node_info) { register ssize_t i; size_t number_children; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) (void) DefineImageColormap(image,cube_info,node_info->child[i]); if (node_info->number_unique != 0) { register MagickRealType alpha; register PixelPacket *magick_restrict q; /* Colormap entry is defined by the mean color in this cube. */ q=image->colormap+image->colors; alpha=(MagickRealType) ((MagickOffsetType) node_info->number_unique); alpha=PerceptibleReciprocal(alpha); if (cube_info->associate_alpha == MagickFalse) { SetPixelRed(q,ClampToQuantum((MagickRealType) (alpha* QuantumRange*node_info->total_color.red))); SetPixelGreen(q,ClampToQuantum((MagickRealType) (alpha* QuantumRange*node_info->total_color.green))); SetPixelBlue(q,ClampToQuantum((MagickRealType) (alpha* QuantumRange*node_info->total_color.blue))); SetPixelOpacity(q,OpaqueOpacity); } else { MagickRealType opacity; opacity=(MagickRealType) (alpha*QuantumRange* node_info->total_color.opacity); SetPixelOpacity(q,ClampToQuantum(opacity)); if (q->opacity == OpaqueOpacity) { SetPixelRed(q,ClampToQuantum((MagickRealType) (alpha* QuantumRange*node_info->total_color.red))); SetPixelGreen(q,ClampToQuantum((MagickRealType) (alpha* QuantumRange*node_info->total_color.green))); SetPixelBlue(q,ClampToQuantum((MagickRealType) (alpha* QuantumRange*node_info->total_color.blue))); } else { double gamma; gamma=(double) (QuantumScale*(QuantumRange-(double) q->opacity)); gamma=PerceptibleReciprocal(gamma); SetPixelRed(q,ClampToQuantum((MagickRealType) (alpha* gamma*QuantumRange*node_info->total_color.red))); SetPixelGreen(q,ClampToQuantum((MagickRealType) (alpha* gamma*QuantumRange*node_info->total_color.green))); SetPixelBlue(q,ClampToQuantum((MagickRealType) (alpha* gamma*QuantumRange*node_info->total_color.blue))); if (node_info->number_unique > cube_info->transparent_pixels) { cube_info->transparent_pixels=node_info->number_unique; cube_info->transparent_index=(ssize_t) image->colors; } } } node_info->color_number=image->colors++; } return(image->colors); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y C u b e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyCubeInfo() deallocates memory associated with an image. % % The format of the DestroyCubeInfo method is: % % DestroyCubeInfo(CubeInfo *cube_info) % % A description of each parameter follows: % % o cube_info: the address of a structure of type CubeInfo. % */ static void DestroyCubeInfo(CubeInfo *cube_info) { register Nodes *nodes; /* Release color cube tree storage. */ do { nodes=cube_info->node_queue->next; cube_info->node_queue->nodes=(NodeInfo *) RelinquishMagickMemory( cube_info->node_queue->nodes); cube_info->node_queue=(Nodes *) RelinquishMagickMemory( cube_info->node_queue); cube_info->node_queue=nodes; } while (cube_info->node_queue != (Nodes *) NULL); if (cube_info->memory_info != (MemoryInfo *) NULL) cube_info->memory_info=RelinquishVirtualMemory(cube_info->memory_info); cube_info->quantize_info=DestroyQuantizeInfo(cube_info->quantize_info); cube_info=(CubeInfo *) RelinquishMagickMemory(cube_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y Q u a n t i z e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyQuantizeInfo() deallocates memory associated with an QuantizeInfo % structure. % % The format of the DestroyQuantizeInfo method is: % % QuantizeInfo *DestroyQuantizeInfo(QuantizeInfo *quantize_info) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % */ MagickExport QuantizeInfo *DestroyQuantizeInfo(QuantizeInfo *quantize_info) { (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(quantize_info != (QuantizeInfo *) NULL); assert(quantize_info->signature == MagickSignature); quantize_info->signature=(~MagickSignature); quantize_info=(QuantizeInfo *) RelinquishMagickMemory(quantize_info); return(quantize_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D i t h e r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DitherImage() distributes the difference between an original image and % the corresponding color reduced algorithm to neighboring pixels using % serpentine-scan Floyd-Steinberg error diffusion. DitherImage returns % MagickTrue if the image is dithered otherwise MagickFalse. % % The format of the DitherImage method is: % % MagickBooleanType DitherImage(Image *image,CubeInfo *cube_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % */ static DoublePixelPacket **DestroyPixelThreadSet(DoublePixelPacket **pixels) { register ssize_t i; assert(pixels != (DoublePixelPacket **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (pixels[i] != (DoublePixelPacket *) NULL) pixels[i]=(DoublePixelPacket *) RelinquishMagickMemory(pixels[i]); pixels=(DoublePixelPacket **) RelinquishMagickMemory(pixels); return(pixels); } static DoublePixelPacket **AcquirePixelThreadSet(const size_t count) { DoublePixelPacket **pixels; register ssize_t i; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); pixels=(DoublePixelPacket **) AcquireQuantumMemory(number_threads, sizeof(*pixels)); if (pixels == (DoublePixelPacket **) NULL) return((DoublePixelPacket **) NULL); (void) ResetMagickMemory(pixels,0,number_threads*sizeof(*pixels)); for (i=0; i < (ssize_t) number_threads; i++) { pixels[i]=(DoublePixelPacket *) AcquireQuantumMemory(count, 2*sizeof(**pixels)); if (pixels[i] == (DoublePixelPacket *) NULL) return(DestroyPixelThreadSet(pixels)); } return(pixels); } static inline ssize_t CacheOffset(CubeInfo *cube_info, const DoublePixelPacket *pixel) { #define RedShift(pixel) (((pixel) >> CacheShift) << (0*(8-CacheShift))) #define GreenShift(pixel) (((pixel) >> CacheShift) << (1*(8-CacheShift))) #define BlueShift(pixel) (((pixel) >> CacheShift) << (2*(8-CacheShift))) #define AlphaShift(pixel) (((pixel) >> CacheShift) << (3*(8-CacheShift))) ssize_t offset; offset=(ssize_t) (RedShift(ScaleQuantumToChar(ClampPixel(pixel->red))) | GreenShift(ScaleQuantumToChar(ClampPixel(pixel->green))) | BlueShift(ScaleQuantumToChar(ClampPixel(pixel->blue)))); if (cube_info->associate_alpha != MagickFalse) offset|=AlphaShift(ScaleQuantumToChar(ClampPixel(pixel->opacity))); return(offset); } static MagickBooleanType FloydSteinbergDither(Image *image,CubeInfo *cube_info) { #define DitherImageTag "Dither/Image" CacheView *image_view; DoublePixelPacket **pixels; ExceptionInfo *exception; MagickBooleanType status; ssize_t y; /* Distribute quantization error using Floyd-Steinberg. */ pixels=AcquirePixelThreadSet(image->columns); if (pixels == (DoublePixelPacket **) NULL) return(MagickFalse); exception=(&image->exception); status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); CubeInfo cube; DoublePixelPacket *current, *previous; register IndexPacket *magick_restrict indexes; register PixelPacket *magick_restrict q; register ssize_t x; size_t index; ssize_t v; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); cube=(*cube_info); current=pixels[id]+(y & 0x01)*image->columns; previous=pixels[id]+((y+1) & 0x01)*image->columns; v=(ssize_t) ((y & 0x01) ? -1 : 1); for (x=0; x < (ssize_t) image->columns; x++) { DoublePixelPacket color, pixel; register ssize_t i; ssize_t u; u=(y & 0x01) ? (ssize_t) image->columns-1-x : x; AssociateAlphaPixel(&cube,q+u,&pixel); if (x > 0) { pixel.red+=7*current[u-v].red/16; pixel.green+=7*current[u-v].green/16; pixel.blue+=7*current[u-v].blue/16; if (cube.associate_alpha != MagickFalse) pixel.opacity+=7*current[u-v].opacity/16; } if (y > 0) { if (x < (ssize_t) (image->columns-1)) { pixel.red+=previous[u+v].red/16; pixel.green+=previous[u+v].green/16; pixel.blue+=previous[u+v].blue/16; if (cube.associate_alpha != MagickFalse) pixel.opacity+=previous[u+v].opacity/16; } pixel.red+=5*previous[u].red/16; pixel.green+=5*previous[u].green/16; pixel.blue+=5*previous[u].blue/16; if (cube.associate_alpha != MagickFalse) pixel.opacity+=5*previous[u].opacity/16; if (x > 0) { pixel.red+=3*previous[u-v].red/16; pixel.green+=3*previous[u-v].green/16; pixel.blue+=3*previous[u-v].blue/16; if (cube.associate_alpha != MagickFalse) pixel.opacity+=3*previous[u-v].opacity/16; } } pixel.red=(MagickRealType) ClampPixel(pixel.red); pixel.green=(MagickRealType) ClampPixel(pixel.green); pixel.blue=(MagickRealType) ClampPixel(pixel.blue); if (cube.associate_alpha != MagickFalse) pixel.opacity=(MagickRealType) ClampPixel(pixel.opacity); i=CacheOffset(&cube,&pixel); if (cube.cache[i] < 0) { register NodeInfo *node_info; register size_t id; /* Identify the deepest node containing the pixel's color. */ node_info=cube.root; for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--) { id=ColorToNodeId(&cube,&pixel,index); if (node_info->child[id] == (NodeInfo *) NULL) break; node_info=node_info->child[id]; } /* Find closest color among siblings and their children. */ cube.target=pixel; cube.distance=(MagickRealType) (4.0*(QuantumRange+1.0)*(QuantumRange+ 1.0)+1.0); ClosestColor(image,&cube,node_info->parent); cube.cache[i]=(ssize_t) cube.color_number; } /* Assign pixel to closest colormap entry. */ index=(size_t) cube.cache[i]; if (image->storage_class == PseudoClass) SetPixelIndex(indexes+u,index); if (cube.quantize_info->measure_error == MagickFalse) { SetPixelRgb(q+u,image->colormap+index); if (cube.associate_alpha != MagickFalse) SetPixelOpacity(q+u,image->colormap[index].opacity); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; /* Store the error. */ AssociateAlphaPixel(&cube,image->colormap+index,&color); current[u].red=pixel.red-color.red; current[u].green=pixel.green-color.green; current[u].blue=pixel.blue-color.blue; if (cube.associate_alpha != MagickFalse) current[u].opacity=pixel.opacity-color.opacity; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,DitherImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } } image_view=DestroyCacheView(image_view); pixels=DestroyPixelThreadSet(pixels); return(MagickTrue); } static MagickBooleanType RiemersmaDither(Image *,CacheView *,CubeInfo *,const unsigned int); static void Riemersma(Image *image,CacheView *image_view,CubeInfo *cube_info, const size_t level,const unsigned int direction) { if (level == 1) switch (direction) { case WestGravity: { (void) RiemersmaDither(image,image_view,cube_info,EastGravity); (void) RiemersmaDither(image,image_view,cube_info,SouthGravity); (void) RiemersmaDither(image,image_view,cube_info,WestGravity); break; } case EastGravity: { (void) RiemersmaDither(image,image_view,cube_info,WestGravity); (void) RiemersmaDither(image,image_view,cube_info,NorthGravity); (void) RiemersmaDither(image,image_view,cube_info,EastGravity); break; } case NorthGravity: { (void) RiemersmaDither(image,image_view,cube_info,SouthGravity); (void) RiemersmaDither(image,image_view,cube_info,EastGravity); (void) RiemersmaDither(image,image_view,cube_info,NorthGravity); break; } case SouthGravity: { (void) RiemersmaDither(image,image_view,cube_info,NorthGravity); (void) RiemersmaDither(image,image_view,cube_info,WestGravity); (void) RiemersmaDither(image,image_view,cube_info,SouthGravity); break; } default: break; } else switch (direction) { case WestGravity: { Riemersma(image,image_view,cube_info,level-1,NorthGravity); (void) RiemersmaDither(image,image_view,cube_info,EastGravity); Riemersma(image,image_view,cube_info,level-1,WestGravity); (void) RiemersmaDither(image,image_view,cube_info,SouthGravity); Riemersma(image,image_view,cube_info,level-1,WestGravity); (void) RiemersmaDither(image,image_view,cube_info,WestGravity); Riemersma(image,image_view,cube_info,level-1,SouthGravity); break; } case EastGravity: { Riemersma(image,image_view,cube_info,level-1,SouthGravity); (void) RiemersmaDither(image,image_view,cube_info,WestGravity); Riemersma(image,image_view,cube_info,level-1,EastGravity); (void) RiemersmaDither(image,image_view,cube_info,NorthGravity); Riemersma(image,image_view,cube_info,level-1,EastGravity); (void) RiemersmaDither(image,image_view,cube_info,EastGravity); Riemersma(image,image_view,cube_info,level-1,NorthGravity); break; } case NorthGravity: { Riemersma(image,image_view,cube_info,level-1,WestGravity); (void) RiemersmaDither(image,image_view,cube_info,SouthGravity); Riemersma(image,image_view,cube_info,level-1,NorthGravity); (void) RiemersmaDither(image,image_view,cube_info,EastGravity); Riemersma(image,image_view,cube_info,level-1,NorthGravity); (void) RiemersmaDither(image,image_view,cube_info,NorthGravity); Riemersma(image,image_view,cube_info,level-1,EastGravity); break; } case SouthGravity: { Riemersma(image,image_view,cube_info,level-1,EastGravity); (void) RiemersmaDither(image,image_view,cube_info,NorthGravity); Riemersma(image,image_view,cube_info,level-1,SouthGravity); (void) RiemersmaDither(image,image_view,cube_info,WestGravity); Riemersma(image,image_view,cube_info,level-1,SouthGravity); (void) RiemersmaDither(image,image_view,cube_info,SouthGravity); Riemersma(image,image_view,cube_info,level-1,WestGravity); break; } default: break; } } static MagickBooleanType RiemersmaDither(Image *image,CacheView *image_view, CubeInfo *cube_info,const unsigned int direction) { #define DitherImageTag "Dither/Image" DoublePixelPacket color, pixel; MagickBooleanType proceed; register CubeInfo *p; size_t index; p=cube_info; if ((p->x >= 0) && (p->x < (ssize_t) image->columns) && (p->y >= 0) && (p->y < (ssize_t) image->rows)) { ExceptionInfo *exception; register IndexPacket *magick_restrict indexes; register PixelPacket *magick_restrict q; register ssize_t i; /* Distribute error. */ exception=(&image->exception); q=GetCacheViewAuthenticPixels(image_view,p->x,p->y,1,1,exception); if (q == (PixelPacket *) NULL) return(MagickFalse); indexes=GetCacheViewAuthenticIndexQueue(image_view); AssociateAlphaPixel(cube_info,q,&pixel); for (i=0; i < ErrorQueueLength; i++) { pixel.red+=p->weights[i]*p->error[i].red; pixel.green+=p->weights[i]*p->error[i].green; pixel.blue+=p->weights[i]*p->error[i].blue; if (cube_info->associate_alpha != MagickFalse) pixel.opacity+=p->weights[i]*p->error[i].opacity; } pixel.red=(MagickRealType) ClampPixel(pixel.red); pixel.green=(MagickRealType) ClampPixel(pixel.green); pixel.blue=(MagickRealType) ClampPixel(pixel.blue); if (cube_info->associate_alpha != MagickFalse) pixel.opacity=(MagickRealType) ClampPixel(pixel.opacity); i=CacheOffset(cube_info,&pixel); if (p->cache[i] < 0) { register NodeInfo *node_info; register size_t id; /* Identify the deepest node containing the pixel's color. */ node_info=p->root; for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--) { id=ColorToNodeId(cube_info,&pixel,index); if (node_info->child[id] == (NodeInfo *) NULL) break; node_info=node_info->child[id]; } /* Find closest color among siblings and their children. */ p->target=pixel; p->distance=(MagickRealType) (4.0*(QuantumRange+1.0)*((MagickRealType) QuantumRange+1.0)+1.0); ClosestColor(image,p,node_info->parent); p->cache[i]=(ssize_t) p->color_number; } /* Assign pixel to closest colormap entry. */ index=(size_t) (1*p->cache[i]); if (image->storage_class == PseudoClass) *indexes=(IndexPacket) index; if (cube_info->quantize_info->measure_error == MagickFalse) { SetPixelRgb(q,image->colormap+index); if (cube_info->associate_alpha != MagickFalse) SetPixelOpacity(q,image->colormap[index].opacity); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) return(MagickFalse); /* Propagate the error as the last entry of the error queue. */ (void) CopyMagickMemory(p->error,p->error+1,(ErrorQueueLength-1)* sizeof(p->error[0])); AssociateAlphaPixel(cube_info,image->colormap+index,&color); p->error[ErrorQueueLength-1].red=pixel.red-color.red; p->error[ErrorQueueLength-1].green=pixel.green-color.green; p->error[ErrorQueueLength-1].blue=pixel.blue-color.blue; if (cube_info->associate_alpha != MagickFalse) p->error[ErrorQueueLength-1].opacity=pixel.opacity-color.opacity; proceed=SetImageProgress(image,DitherImageTag,p->offset,p->span); if (proceed == MagickFalse) return(MagickFalse); p->offset++; } switch (direction) { case WestGravity: p->x--; break; case EastGravity: p->x++; break; case NorthGravity: p->y--; break; case SouthGravity: p->y++; break; } return(MagickTrue); } static MagickBooleanType DitherImage(Image *image,CubeInfo *cube_info) { CacheView *image_view; MagickBooleanType status; register ssize_t i; size_t depth; if (cube_info->quantize_info->dither_method != RiemersmaDitherMethod) return(FloydSteinbergDither(image,cube_info)); /* Distribute quantization error along a Hilbert curve. */ (void) ResetMagickMemory(cube_info->error,0,ErrorQueueLength* sizeof(*cube_info->error)); cube_info->x=0; cube_info->y=0; i=MagickMax((ssize_t) image->columns,(ssize_t) image->rows); for (depth=1; i != 0; depth++) i>>=1; if ((ssize_t) (1L << depth) < MagickMax((ssize_t) image->columns,(ssize_t) image->rows)) depth++; cube_info->offset=0; cube_info->span=(MagickSizeType) image->columns*image->rows; image_view=AcquireAuthenticCacheView(image,&image->exception); if (depth > 1) Riemersma(image,image_view,cube_info,depth-1,NorthGravity); status=RiemersmaDither(image,image_view,cube_info,ForgetGravity); image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t C u b e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetCubeInfo() initialize the Cube data structure. % % The format of the GetCubeInfo method is: % % CubeInfo GetCubeInfo(const QuantizeInfo *quantize_info, % const size_t depth,const size_t maximum_colors) % % A description of each parameter follows. % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o depth: Normally, this integer value is zero or one. A zero or % one tells Quantize to choose a optimal tree depth of Log4(number_colors). % A tree of this depth generally allows the best representation of the % reference image with the least amount of memory and the fastest % computational speed. In some cases, such as an image with low color % dispersion (a few number of colors), a value other than % Log4(number_colors) is required. To expand the color tree completely, % use a value of 8. % % o maximum_colors: maximum colors. % */ static CubeInfo *GetCubeInfo(const QuantizeInfo *quantize_info, const size_t depth,const size_t maximum_colors) { CubeInfo *cube_info; MagickRealType sum, weight; register ssize_t i; size_t length; /* Initialize tree to describe color cube_info. */ cube_info=(CubeInfo *) AcquireMagickMemory(sizeof(*cube_info)); if (cube_info == (CubeInfo *) NULL) return((CubeInfo *) NULL); (void) ResetMagickMemory(cube_info,0,sizeof(*cube_info)); cube_info->depth=depth; if (cube_info->depth > MaxTreeDepth) cube_info->depth=MaxTreeDepth; if (cube_info->depth < 2) cube_info->depth=2; cube_info->maximum_colors=maximum_colors; /* Initialize root node. */ cube_info->root=GetNodeInfo(cube_info,0,0,(NodeInfo *) NULL); if (cube_info->root == (NodeInfo *) NULL) return((CubeInfo *) NULL); cube_info->root->parent=cube_info->root; cube_info->quantize_info=CloneQuantizeInfo(quantize_info); if (cube_info->quantize_info->dither == MagickFalse) return(cube_info); /* Initialize dither resources. */ length=(size_t) (1UL << (4*(8-CacheShift))); cube_info->memory_info=AcquireVirtualMemory(length,sizeof(*cube_info->cache)); if (cube_info->memory_info == (MemoryInfo *) NULL) return((CubeInfo *) NULL); cube_info->cache=(ssize_t *) GetVirtualMemoryBlob(cube_info->memory_info); /* Initialize color cache. */ (void) ResetMagickMemory(cube_info->cache,(-1),sizeof(*cube_info->cache)* length); /* Distribute weights along a curve of exponential decay. */ weight=1.0; for (i=0; i < ErrorQueueLength; i++) { cube_info->weights[ErrorQueueLength-i-1]=PerceptibleReciprocal(weight); weight*=exp(log(((double) QuantumRange+1.0))/(ErrorQueueLength-1.0)); } /* Normalize the weighting factors. */ weight=0.0; for (i=0; i < ErrorQueueLength; i++) weight+=cube_info->weights[i]; sum=0.0; for (i=0; i < ErrorQueueLength; i++) { cube_info->weights[i]/=weight; sum+=cube_info->weights[i]; } cube_info->weights[0]+=1.0-sum; return(cube_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t N o d e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetNodeInfo() allocates memory for a new node in the color cube tree and % presets all fields to zero. % % The format of the GetNodeInfo method is: % % NodeInfo *GetNodeInfo(CubeInfo *cube_info,const size_t id, % const size_t level,NodeInfo *parent) % % A description of each parameter follows. % % o node: The GetNodeInfo method returns a pointer to a queue of nodes. % % o id: Specifies the child number of the node. % % o level: Specifies the level in the storage_class the node resides. % */ static NodeInfo *GetNodeInfo(CubeInfo *cube_info,const size_t id, const size_t level,NodeInfo *parent) { NodeInfo *node_info; if (cube_info->free_nodes == 0) { Nodes *nodes; /* Allocate a new queue of nodes. */ nodes=(Nodes *) AcquireMagickMemory(sizeof(*nodes)); if (nodes == (Nodes *) NULL) return((NodeInfo *) NULL); nodes->nodes=(NodeInfo *) AcquireQuantumMemory(NodesInAList, sizeof(*nodes->nodes)); if (nodes->nodes == (NodeInfo *) NULL) return((NodeInfo *) NULL); nodes->next=cube_info->node_queue; cube_info->node_queue=nodes; cube_info->next_node=nodes->nodes; cube_info->free_nodes=NodesInAList; } cube_info->nodes++; cube_info->free_nodes--; node_info=cube_info->next_node++; (void) ResetMagickMemory(node_info,0,sizeof(*node_info)); node_info->parent=parent; node_info->id=id; node_info->level=level; return(node_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e Q u a n t i z e E r r o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageQuantizeError() measures the difference between the original % and quantized images. This difference is the total quantization error. % The error is computed by summing over all pixels in an image the distance % squared in RGB space between each reference pixel value and its quantized % value. These values are computed: % % o mean_error_per_pixel: This value is the mean error for any single % pixel in the image. % % o normalized_mean_square_error: This value is the normalized mean % quantization error for any single pixel in the image. This distance % measure is normalized to a range between 0 and 1. It is independent % of the range of red, green, and blue values in the image. % % o normalized_maximum_square_error: Thsi value is the normalized % maximum quantization error for any single pixel in the image. This % distance measure is normalized to a range between 0 and 1. It is % independent of the range of red, green, and blue values in your image. % % The format of the GetImageQuantizeError method is: % % MagickBooleanType GetImageQuantizeError(Image *image) % % A description of each parameter follows. % % o image: the image. % */ MagickExport MagickBooleanType GetImageQuantizeError(Image *image) { CacheView *image_view; ExceptionInfo *exception; IndexPacket *indexes; MagickRealType alpha, area, beta, distance, maximum_error, mean_error, mean_error_per_pixel; size_t index; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); image->total_colors=GetNumberColors(image,(FILE *) NULL,&image->exception); (void) ResetMagickMemory(&image->error,0,sizeof(image->error)); if (image->storage_class == DirectClass) return(MagickTrue); alpha=1.0; beta=1.0; area=3.0*image->columns*image->rows; maximum_error=0.0; mean_error_per_pixel=0.0; mean_error=0.0; exception=(&image->exception); image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *magick_restrict p; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { index=1UL*GetPixelIndex(indexes+x); if (image->matte != MagickFalse) { alpha=(MagickRealType) (QuantumScale*(GetPixelAlpha(p))); beta=(MagickRealType) (QuantumScale*(QuantumRange- image->colormap[index].opacity)); } distance=fabs((double) (alpha*GetPixelRed(p)-beta* image->colormap[index].red)); mean_error_per_pixel+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; distance=fabs((double) (alpha*GetPixelGreen(p)-beta* image->colormap[index].green)); mean_error_per_pixel+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; distance=fabs((double) (alpha*GetPixelBlue(p)-beta* image->colormap[index].blue)); mean_error_per_pixel+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; p++; } } image_view=DestroyCacheView(image_view); image->error.mean_error_per_pixel=(double) mean_error_per_pixel/area; image->error.normalized_mean_error=(double) QuantumScale*QuantumScale* mean_error/area; image->error.normalized_maximum_error=(double) QuantumScale*maximum_error; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t Q u a n t i z e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetQuantizeInfo() initializes the QuantizeInfo structure. % % The format of the GetQuantizeInfo method is: % % GetQuantizeInfo(QuantizeInfo *quantize_info) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to a QuantizeInfo structure. % */ MagickExport void GetQuantizeInfo(QuantizeInfo *quantize_info) { (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(quantize_info != (QuantizeInfo *) NULL); (void) ResetMagickMemory(quantize_info,0,sizeof(*quantize_info)); quantize_info->number_colors=256; quantize_info->dither=MagickTrue; quantize_info->dither_method=RiemersmaDitherMethod; quantize_info->colorspace=UndefinedColorspace; quantize_info->measure_error=MagickFalse; quantize_info->signature=MagickSignature; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P o s t e r i z e I m a g e C h a n n e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PosterizeImage() reduces the image to a limited number of colors for a % "poster" effect. % % The format of the PosterizeImage method is: % % MagickBooleanType PosterizeImage(Image *image,const size_t levels, % const MagickBooleanType dither) % MagickBooleanType PosterizeImageChannel(Image *image, % const ChannelType channel,const size_t levels, % const MagickBooleanType dither) % % A description of each parameter follows: % % o image: Specifies a pointer to an Image structure. % % o levels: Number of color levels allowed in each channel. Very low values % (2, 3, or 4) have the most visible effect. % % o dither: Set this integer value to something other than zero to dither % the mapped image. % */ static inline double MagickRound(double x) { /* Round the fraction to nearest integer. */ if ((x-floor(x)) < (ceil(x)-x)) return(floor(x)); return(ceil(x)); } MagickExport MagickBooleanType PosterizeImage(Image *image,const size_t levels, const MagickBooleanType dither) { MagickBooleanType status; status=PosterizeImageChannel(image,DefaultChannels,levels,dither); return(status); } MagickExport MagickBooleanType PosterizeImageChannel(Image *image, const ChannelType channel,const size_t levels,const MagickBooleanType dither) { #define PosterizeImageTag "Posterize/Image" #define PosterizePixel(pixel) (Quantum) (QuantumRange*(MagickRound( \ QuantumScale*pixel*(levels-1)))/MagickMax((ssize_t) levels-1,1)) CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; QuantizeInfo *quantize_info; register ssize_t i; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,1,1) #endif for (i=0; i < (ssize_t) image->colors; i++) { /* Posterize colormap. */ if ((channel & RedChannel) != 0) image->colormap[i].red=PosterizePixel(image->colormap[i].red); if ((channel & GreenChannel) != 0) image->colormap[i].green=PosterizePixel(image->colormap[i].green); if ((channel & BlueChannel) != 0) image->colormap[i].blue=PosterizePixel(image->colormap[i].blue); if ((channel & OpacityChannel) != 0) image->colormap[i].opacity=PosterizePixel(image->colormap[i].opacity); } /* Posterize image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q,PosterizePixel(GetPixelRed(q))); if ((channel & GreenChannel) != 0) SetPixelGreen(q,PosterizePixel(GetPixelGreen(q))); if ((channel & BlueChannel) != 0) SetPixelBlue(q,PosterizePixel(GetPixelBlue(q))); if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) SetPixelOpacity(q,PosterizePixel(GetPixelOpacity(q))); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,PosterizePixel(GetPixelIndex(indexes+x))); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_PosterizeImageChannel) #endif proceed=SetImageProgress(image,PosterizeImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); quantize_info=AcquireQuantizeInfo((ImageInfo *) NULL); quantize_info->number_colors=(size_t) MagickMin((ssize_t) levels*levels* levels,MaxColormapSize+1); quantize_info->dither=dither; quantize_info->tree_depth=MaxTreeDepth; status=QuantizeImage(quantize_info,image); quantize_info=DestroyQuantizeInfo(quantize_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P r u n e C h i l d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PruneChild() deletes the given node and merges its statistics into its % parent. % % The format of the PruneSubtree method is: % % PruneChild(CubeInfo *cube_info,const NodeInfo *node_info) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is to be pruned. % */ static void PruneChild(CubeInfo *cube_info,const NodeInfo *node_info) { NodeInfo *parent; register ssize_t i; size_t number_children; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) PruneChild(cube_info,node_info->child[i]); /* Merge color statistics into parent. */ parent=node_info->parent; parent->number_unique+=node_info->number_unique; parent->total_color.red+=node_info->total_color.red; parent->total_color.green+=node_info->total_color.green; parent->total_color.blue+=node_info->total_color.blue; parent->total_color.opacity+=node_info->total_color.opacity; parent->child[node_info->id]=(NodeInfo *) NULL; cube_info->nodes--; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P r u n e L e v e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PruneLevel() deletes all nodes at the bottom level of the color tree merging % their color statistics into their parent node. % % The format of the PruneLevel method is: % % PruneLevel(CubeInfo *cube_info,const NodeInfo *node_info) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is to be pruned. % */ static void PruneLevel(CubeInfo *cube_info,const NodeInfo *node_info) { register ssize_t i; size_t number_children; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) PruneLevel(cube_info,node_info->child[i]); if (node_info->level == cube_info->depth) PruneChild(cube_info,node_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P r u n e T o C u b e D e p t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PruneToCubeDepth() deletes any nodes at a depth greater than % cube_info->depth while merging their color statistics into their parent % node. % % The format of the PruneToCubeDepth method is: % % PruneToCubeDepth(CubeInfo *cube_info,const NodeInfo *node_info) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is to be pruned. % */ static void PruneToCubeDepth(CubeInfo *cube_info,const NodeInfo *node_info) { register ssize_t i; size_t number_children; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) PruneToCubeDepth(cube_info,node_info->child[i]); if (node_info->level > cube_info->depth) PruneChild(cube_info,node_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % Q u a n t i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QuantizeImage() analyzes the colors within a reference image and chooses a % fixed number of colors to represent the image. The goal of the algorithm % is to minimize the color difference between the input and output image while % minimizing the processing time. % % The format of the QuantizeImage method is: % % MagickBooleanType QuantizeImage(const QuantizeInfo *quantize_info, % Image *image) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o image: the image. % */ static MagickBooleanType DirectToColormapImage(Image *image, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; register ssize_t i; size_t number_colors; ssize_t y; status=MagickTrue; number_colors=(size_t) (image->columns*image->rows); if (AcquireImageColormap(image,number_colors) == MagickFalse) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); if (image->colors != number_colors) return(MagickFalse); i=0; image_view=AcquireAuthenticCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType proceed; register IndexPacket *magick_restrict indexes; register PixelPacket *magick_restrict q; register ssize_t x; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (const PixelPacket *) NULL) break; indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { image->colormap[i].red=GetPixelRed(q); image->colormap[i].green=GetPixelGreen(q); image->colormap[i].blue=GetPixelBlue(q); image->colormap[i].opacity=GetPixelOpacity(q); SetPixelIndex(indexes+x,i); i++; q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) break; proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } MagickExport MagickBooleanType QuantizeImage(const QuantizeInfo *quantize_info, Image *image) { CubeInfo *cube_info; MagickBooleanType status; size_t depth, maximum_colors; assert(quantize_info != (const QuantizeInfo *) NULL); assert(quantize_info->signature == MagickSignature); assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); maximum_colors=quantize_info->number_colors; if (maximum_colors == 0) maximum_colors=MaxColormapSize; if (maximum_colors > MaxColormapSize) maximum_colors=MaxColormapSize; if (image->matte == MagickFalse) { if ((image->columns*image->rows) <= maximum_colors) (void) DirectToColormapImage(image,&image->exception); if (SetImageGray(image,&image->exception) != MagickFalse) (void) SetGrayscaleImage(image); } if ((image->storage_class == PseudoClass) && (image->colors <= maximum_colors)) { if ((quantize_info->colorspace != UndefinedColorspace) && (quantize_info->colorspace != CMYKColorspace)) (void) TransformImageColorspace(image,quantize_info->colorspace); return(MagickTrue); } depth=quantize_info->tree_depth; if (depth == 0) { size_t colors; /* Depth of color tree is: Log4(colormap size)+2. */ colors=maximum_colors; for (depth=1; colors != 0; depth++) colors>>=2; if ((quantize_info->dither != MagickFalse) && (depth > 2)) depth--; if ((image->matte != MagickFalse) && (depth > 5)) depth--; if (SetImageGray(image,&image->exception) != MagickFalse) depth=MaxTreeDepth; } /* Initialize color cube. */ cube_info=GetCubeInfo(quantize_info,depth,maximum_colors); if (cube_info == (CubeInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=ClassifyImageColors(cube_info,image,&image->exception); if (status != MagickFalse) { /* Reduce the number of colors in the image if it contains more than the maximum, otherwise we can disable dithering to improve the performance. */ if (cube_info->colors > cube_info->maximum_colors) ReduceImageColors(image,cube_info); else cube_info->quantize_info->dither_method=NoDitherMethod; status=AssignImageColors(image,cube_info); } DestroyCubeInfo(cube_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % Q u a n t i z e I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QuantizeImages() analyzes the colors within a set of reference images and % chooses a fixed number of colors to represent the set. The goal of the % algorithm is to minimize the color difference between the input and output % images while minimizing the processing time. % % The format of the QuantizeImages method is: % % MagickBooleanType QuantizeImages(const QuantizeInfo *quantize_info, % Image *images) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o images: Specifies a pointer to a list of Image structures. % */ MagickExport MagickBooleanType QuantizeImages(const QuantizeInfo *quantize_info, Image *images) { CubeInfo *cube_info; Image *image; MagickBooleanType proceed, status; MagickProgressMonitor progress_monitor; register ssize_t i; size_t depth, maximum_colors, number_images; assert(quantize_info != (const QuantizeInfo *) NULL); assert(quantize_info->signature == MagickSignature); assert(images != (Image *) NULL); assert(images->signature == MagickSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); if (GetNextImageInList(images) == (Image *) NULL) { /* Handle a single image with QuantizeImage. */ status=QuantizeImage(quantize_info,images); return(status); } status=MagickFalse; maximum_colors=quantize_info->number_colors; if (maximum_colors == 0) maximum_colors=MaxColormapSize; if (maximum_colors > MaxColormapSize) maximum_colors=MaxColormapSize; depth=quantize_info->tree_depth; if (depth == 0) { size_t colors; /* Depth of color tree is: Log4(colormap size)+2. */ colors=maximum_colors; for (depth=1; colors != 0; depth++) colors>>=2; if (quantize_info->dither != MagickFalse) depth--; } /* Initialize color cube. */ cube_info=GetCubeInfo(quantize_info,depth,maximum_colors); if (cube_info == (CubeInfo *) NULL) { (void) ThrowMagickException(&images->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename); return(MagickFalse); } number_images=GetImageListLength(images); image=images; for (i=0; image != (Image *) NULL; i++) { progress_monitor=SetImageProgressMonitor(image,(MagickProgressMonitor) NULL, image->client_data); status=ClassifyImageColors(cube_info,image,&image->exception); if (status == MagickFalse) break; (void) SetImageProgressMonitor(image,progress_monitor,image->client_data); proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) i, number_images); if (proceed == MagickFalse) break; image=GetNextImageInList(image); } if (status != MagickFalse) { /* Reduce the number of colors in an image sequence. */ ReduceImageColors(images,cube_info); image=images; for (i=0; image != (Image *) NULL; i++) { progress_monitor=SetImageProgressMonitor(image,(MagickProgressMonitor) NULL,image->client_data); status=AssignImageColors(image,cube_info); if (status == MagickFalse) break; (void) SetImageProgressMonitor(image,progress_monitor, image->client_data); proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) i, number_images); if (proceed == MagickFalse) break; image=GetNextImageInList(image); } } DestroyCubeInfo(cube_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + Q u a n t i z e E r r o r F l a t t e n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QuantizeErrorFlatten() traverses the color cube and flattens the quantization % error into a sorted 1D array. This accelerates the color reduction process. % % Contributed by Yoya. % % The format of the QuantizeErrorFlatten method is: % % size_t QuantizeErrorFlatten(const CubeInfo *cube_info, % const NodeInfo *node_info,const ssize_t offset, % MagickRealType *quantize_error) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is current pointer. % % o offset: quantize error offset. % % o quantize_error: the quantization error vector. % */ static size_t QuantizeErrorFlatten(const CubeInfo *cube_info, const NodeInfo *node_info,const ssize_t offset, MagickRealType *quantize_error) { register ssize_t i; size_t n, number_children; if (offset >= (ssize_t) cube_info->nodes) return(0); quantize_error[offset]=node_info->quantize_error; n=1; number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children ; i++) if (node_info->child[i] != (NodeInfo *) NULL) n+=QuantizeErrorFlatten(cube_info,node_info->child[i],offset+n, quantize_error); return(n); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e d u c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Reduce() traverses the color cube tree and prunes any node whose % quantization error falls below a particular threshold. % % The format of the Reduce method is: % % Reduce(CubeInfo *cube_info,const NodeInfo *node_info) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is to be pruned. % */ static void Reduce(CubeInfo *cube_info,const NodeInfo *node_info) { register ssize_t i; size_t number_children; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) Reduce(cube_info,node_info->child[i]); if (node_info->quantize_error <= cube_info->pruning_threshold) PruneChild(cube_info,node_info); else { /* Find minimum pruning threshold. */ if (node_info->number_unique > 0) cube_info->colors++; if (node_info->quantize_error < cube_info->next_threshold) cube_info->next_threshold=node_info->quantize_error; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e d u c e I m a g e C o l o r s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReduceImageColors() repeatedly prunes the tree until the number of nodes % with n2 > 0 is less than or equal to the maximum number of colors allowed % in the output image. On any given iteration over the tree, it selects % those nodes whose E value is minimal for pruning and merges their % color statistics upward. It uses a pruning threshold, Ep, to govern % node selection as follows: % % Ep = 0 % while number of nodes with (n2 > 0) > required maximum number of colors % prune all nodes such that E <= Ep % Set Ep to minimum E in remaining nodes % % This has the effect of minimizing any quantization error when merging % two nodes together. % % When a node to be pruned has offspring, the pruning procedure invokes % itself recursively in order to prune the tree from the leaves upward. % n2, Sr, Sg, and Sb in a node being pruned are always added to the % corresponding data in that node's parent. This retains the pruned % node's color characteristics for later averaging. % % For each node, n2 pixels exist for which that node represents the % smallest volume in RGB space containing those pixel's colors. When n2 % > 0 the node will uniquely define a color in the output image. At the % beginning of reduction, n2 = 0 for all nodes except a the leaves of % the tree which represent colors present in the input image. % % The other pixel count, n1, indicates the total number of colors % within the cubic volume which the node represents. This includes n1 - % n2 pixels whose colors should be defined by nodes at a lower level in % the tree. % % The format of the ReduceImageColors method is: % % ReduceImageColors(const Image *image,CubeInfo *cube_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % */ static int MagickRealTypeCompare(const void *error_p,const void *error_q) { MagickRealType *p, *q; p=(MagickRealType *) error_p; q=(MagickRealType *) error_q; if (*p > *q) return(1); if (fabs((double) (*q-*p)) <= MagickEpsilon) return(0); return(-1); } static void ReduceImageColors(const Image *image,CubeInfo *cube_info) { #define ReduceImageTag "Reduce/Image" MagickBooleanType proceed; MagickOffsetType offset; size_t span; cube_info->next_threshold=0.0; if (cube_info->colors > cube_info->maximum_colors) { MagickRealType *quantize_error; /* Enable rapid reduction of the number of unique colors. */ quantize_error=(MagickRealType *) AcquireQuantumMemory(cube_info->nodes, sizeof(*quantize_error)); if (quantize_error != (MagickRealType *) NULL) { (void) QuantizeErrorFlatten(cube_info,cube_info->root,0, quantize_error); qsort(quantize_error,cube_info->nodes,sizeof(MagickRealType), MagickRealTypeCompare); if (cube_info->nodes > (110*(cube_info->maximum_colors+1)/100)) cube_info->next_threshold=quantize_error[cube_info->nodes-110* (cube_info->maximum_colors+1)/100]; quantize_error=(MagickRealType *) RelinquishMagickMemory( quantize_error); } } for (span=cube_info->colors; cube_info->colors > cube_info->maximum_colors; ) { cube_info->pruning_threshold=cube_info->next_threshold; cube_info->next_threshold=cube_info->root->quantize_error-1; cube_info->colors=0; Reduce(cube_info,cube_info->root); offset=(MagickOffsetType) span-cube_info->colors; proceed=SetImageProgress(image,ReduceImageTag,offset,span- cube_info->maximum_colors+1); if (proceed == MagickFalse) break; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e m a p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RemapImage() replaces the colors of an image with the closest color from % a reference image. % % The format of the RemapImage method is: % % MagickBooleanType RemapImage(const QuantizeInfo *quantize_info, % Image *image,const Image *remap_image) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o image: the image. % % o remap_image: the reference image. % */ MagickExport MagickBooleanType RemapImage(const QuantizeInfo *quantize_info, Image *image,const Image *remap_image) { CubeInfo *cube_info; MagickBooleanType status; /* Initialize color cube. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(remap_image != (Image *) NULL); assert(remap_image->signature == MagickSignature); cube_info=GetCubeInfo(quantize_info,MaxTreeDepth, quantize_info->number_colors); if (cube_info == (CubeInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=ClassifyImageColors(cube_info,remap_image,&image->exception); if (status != MagickFalse) { /* Classify image colors from the reference image. */ cube_info->quantize_info->number_colors=cube_info->colors; status=AssignImageColors(image,cube_info); } DestroyCubeInfo(cube_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e m a p I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RemapImages() replaces the colors of a sequence of images with the % closest color from a reference image. % % The format of the RemapImage method is: % % MagickBooleanType RemapImages(const QuantizeInfo *quantize_info, % Image *images,Image *remap_image) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o images: the image sequence. % % o remap_image: the reference image. % */ MagickExport MagickBooleanType RemapImages(const QuantizeInfo *quantize_info, Image *images,const Image *remap_image) { CubeInfo *cube_info; Image *image; MagickBooleanType status; assert(images != (Image *) NULL); assert(images->signature == MagickSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); image=images; if (remap_image == (Image *) NULL) { /* Create a global colormap for an image sequence. */ status=QuantizeImages(quantize_info,images); return(status); } /* Classify image colors from the reference image. */ cube_info=GetCubeInfo(quantize_info,MaxTreeDepth, quantize_info->number_colors); if (cube_info == (CubeInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=ClassifyImageColors(cube_info,remap_image,&image->exception); if (status != MagickFalse) { /* Classify image colors from the reference image. */ cube_info->quantize_info->number_colors=cube_info->colors; image=images; for ( ; image != (Image *) NULL; image=GetNextImageInList(image)) { status=AssignImageColors(image,cube_info); if (status == MagickFalse) break; } } DestroyCubeInfo(cube_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t G r a y s c a l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetGrayscaleImage() converts an image to a PseudoClass grayscale image. % % The format of the SetGrayscaleImage method is: % % MagickBooleanType SetGrayscaleImage(Image *image) % % A description of each parameter follows: % % o image: The image. % */ #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static int IntensityCompare(const void *x,const void *y) { PixelPacket *color_1, *color_2; int intensity; color_1=(PixelPacket *) x; color_2=(PixelPacket *) y; intensity=PixelPacketIntensity(color_1)-(int) PixelPacketIntensity(color_2); return((int) intensity); } #if defined(__cplusplus) || defined(c_plusplus) } #endif static MagickBooleanType SetGrayscaleImage(Image *image) { CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; PixelPacket *colormap; register ssize_t i; ssize_t *colormap_index, j, y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->type != GrayscaleType) (void) TransformImageColorspace(image,GRAYColorspace); colormap_index=(ssize_t *) AcquireQuantumMemory(MaxColormapSize, sizeof(*colormap_index)); if (colormap_index == (ssize_t *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); if (image->storage_class != PseudoClass) { ExceptionInfo *exception; (void) ResetMagickMemory(colormap_index,(-1),MaxColormapSize* sizeof(*colormap_index)); if (AcquireImageColormap(image,MaxColormapSize) == MagickFalse) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); image->colors=0; status=MagickTrue; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { register size_t intensity; intensity=ScaleQuantumToMap(GetPixelRed(q)); if (colormap_index[intensity] < 0) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SetGrayscaleImage) #endif if (colormap_index[intensity] < 0) { colormap_index[intensity]=(ssize_t) image->colors; image->colormap[image->colors].red=GetPixelRed(q); image->colormap[image->colors].green=GetPixelGreen(q); image->colormap[image->colors].blue=GetPixelBlue(q); image->colors++; } } SetPixelIndex(indexes+x,colormap_index[intensity]); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); } for (i=0; i < (ssize_t) image->colors; i++) image->colormap[i].opacity=(unsigned short) i; qsort((void *) image->colormap,image->colors,sizeof(PixelPacket), IntensityCompare); colormap=(PixelPacket *) AcquireQuantumMemory(image->colors, sizeof(*colormap)); if (colormap == (PixelPacket *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); j=0; colormap[j]=image->colormap[0]; for (i=0; i < (ssize_t) image->colors; i++) { if (IsSameColor(image,&colormap[j],&image->colormap[i]) == MagickFalse) { j++; colormap[j]=image->colormap[i]; } colormap_index[(ssize_t) image->colormap[i].opacity]=j; } image->colors=(size_t) (j+1); image->colormap=(PixelPacket *) RelinquishMagickMemory(image->colormap); image->colormap=colormap; status=MagickTrue; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) SetPixelIndex(indexes+x,colormap_index[ScaleQuantumToMap(GetPixelIndex( indexes+x))]); if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index); image->type=GrayscaleType; if (SetImageMonochrome(image,&image->exception) != MagickFalse) image->type=BilevelType; return(status); }
reduction.c
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #include <omp.h> #include "timer.h" /* Code copied from PDF */ double sum(double* vec, int len) { int i; double accu = 0.0; #pragma omp parallel for reduction(+:accu) for (i = 0; i < len; i++) { accu = accu + vec[i]; } return accu; } double reduce(double (fun)(double, double), double* vec, int len, double neutral) { int i; double accu = neutral; #pragma omp parallel for for (i = 0; i < len; i++) { #pragma omp critical accu = fun(accu, vec[i]); } return accu; } double reduce2(double (fun)(double, double), double* vec, int len, double neutral) { /* We need a copy of the original head of the vector, as well as a vector to work in */ double* start_vec = vec; double* calc_vec = NULL; /* We calculate the largest power of two (n) that will fit in len */ int calc_vec_len_copy; int vec_len = len; int calc_vec_len = (int)pow(2, floor(log2(vec_len))); double accu = neutral; /* Continue until the whole original vector has been reduced */ while (vec_len != 0) { /* Remove the current working vector from the original vector. */ vec_len -= calc_vec_len; /* Save the length of the the current working vector to continue * with the leftovers of the original vector. */ calc_vec_len_copy = calc_vec_len; /* While the current working vector has not yet been reduced to zero, * keep merging sets of 2 with the function given. */ while (calc_vec_len != 1) { /* New vector for the results, automatically remove the old. */ calc_vec = malloc((calc_vec_len / 2) * sizeof(double)); /* The merging of sets using the function can be done parallel * Because there are no race conditions in this loop. */ #pragma omp parallel for for (int i = 0; i < calc_vec_len; i+=2) calc_vec[i/2] = fun(vec[i], vec[i+1]); /* Prepare for next step */ vec = calc_vec; calc_vec_len = calc_vec_len / 2; } /* Working vector has been reduced to a single term. Reduce with * the previously done parts. */ accu = fun(accu, vec[0]); /* Shift the original vector beyond all the done indices */ start_vec += calc_vec_len_copy; vec = start_vec; /* Calculate the length of the new working vector */ calc_vec_len = (int)pow(2, floor(log2(vec_len))); } return accu; } double fun(double a, double b) { return (a+a+b+b); } int main(int argc, char *argv[]) { int num_threads; int vec_size; int custom_reduce_method; double accu; double time; double *vec; if (argc < 4) { printf("Usage: %s num_threads vec_size [reduce method]\n", argv[0]); printf(" - num_threads: number of threads to use for simulation, " "should be >=1\n"); printf(" - vec_size: size of the vector to do reduction on 10^n" "should be >=10\n"); printf(" - [reduce_method]: custom | sequential."); return EXIT_FAILURE; } num_threads = atoi(argv[1]); vec_size = pow(10, atoi(argv[2])); if (num_threads < 1) { printf("argument error: num_threads should be >=1.\n"); return EXIT_FAILURE; } if (vec_size < 4) { printf("argument error: vec_size should be >=4.\n"); return EXIT_FAILURE; } if (strcmp(argv[3], "sequential") == 0) { custom_reduce_method = 0; } else { custom_reduce_method = 1; } vec = (double*)malloc(vec_size * sizeof(double)); /* Fill a vector with a sinus values */ #pragma omp parallel for for (int i = 0; i < vec_size; i++) { vec[i] = sin(i); } omp_set_num_threads(num_threads); /* Start timing the effeciency of openMP */ timer_start(); /* Calculate a reduced vector with openMP */ if (custom_reduce_method) { accu = reduce2(fun, vec, vec_size, 0); /* accu = sum(vec, vec_size); */ } else { accu = reduce(fun, vec, vec_size, 0); } /* Stop timing */ time = timer_end(); printf("%d, %d, %g\n",num_threads, vec_size, time); free(vec); vec = NULL; return EXIT_SUCCESS; }
GB_unop__isfinite_bool_fc64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__isfinite_bool_fc64 // op(A') function: GB_unop_tran__isfinite_bool_fc64 // C type: bool // A type: GxB_FC64_t // cast: GxB_FC64_t cij = (aij) // unaryop: cij = GB_cisfinite (aij) #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_cisfinite (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC64_t z = (aij) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC64_t z = (aij) ; \ Cx [pC] = GB_cisfinite (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISFINITE || GxB_NO_BOOL || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__isfinite_bool_fc64 ( bool *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC64_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = (aij) ; Cx [p] = GB_cisfinite (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = (aij) ; Cx [p] = GB_cisfinite (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__isfinite_bool_fc64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
quantize.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % QQQ U U AAA N N TTTTT IIIII ZZZZZ EEEEE % % Q Q U U A A NN N T I ZZ E % % Q Q U U AAAAA N N N T I ZZZ EEEEE % % Q QQ U U A A N NN T I ZZ E % % QQQQ UUU A A N N T IIIII ZZZZZ EEEEE % % % % % % MagickCore Methods to Reduce the Number of Unique Colors in an Image % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Realism in computer graphics typically requires using 24 bits/pixel to % generate an image. Yet many graphic display devices do not contain the % amount of memory necessary to match the spatial and color resolution of % the human eye. The Quantize methods takes a 24 bit image and reduces % the number of colors so it can be displayed on raster device with less % bits per pixel. In most instances, the quantized image closely % resembles the original reference image. % % A reduction of colors in an image is also desirable for image % transmission and real-time animation. % % QuantizeImage() takes a standard RGB or monochrome images and quantizes % them down to some fixed number of colors. % % For purposes of color allocation, an image is a set of n pixels, where % each pixel is a point in RGB space. RGB space is a 3-dimensional % vector space, and each pixel, Pi, is defined by an ordered triple of % red, green, and blue coordinates, (Ri, Gi, Bi). % % Each primary color component (red, green, or blue) represents an % intensity which varies linearly from 0 to a maximum value, Cmax, which % corresponds to full saturation of that color. Color allocation is % defined over a domain consisting of the cube in RGB space with opposite % vertices at (0,0,0) and (Cmax, Cmax, Cmax). QUANTIZE requires Cmax = % 255. % % The algorithm maps this domain onto a tree in which each node % represents a cube within that domain. In the following discussion % these cubes are defined by the coordinate of two opposite vertices (vertex % nearest the origin in RGB space and the vertex farthest from the origin). % % The tree's root node represents the entire domain, (0,0,0) through % (Cmax,Cmax,Cmax). Each lower level in the tree is generated by % subdividing one node's cube into eight smaller cubes of equal size. % This corresponds to bisecting the parent cube with planes passing % through the midpoints of each edge. % % The basic algorithm operates in three phases: Classification, % Reduction, and Assignment. Classification builds a color description % tree for the image. Reduction collapses the tree until the number it % represents, at most, the number of colors desired in the output image. % Assignment defines the output image's color map and sets each pixel's % color by restorage_class in the reduced tree. Our goal is to minimize % the numerical discrepancies between the original colors and quantized % colors (quantization error). % % Classification begins by initializing a color description tree of % sufficient depth to represent each possible input color in a leaf. % However, it is impractical to generate a fully-formed color description % tree in the storage_class phase for realistic values of Cmax. If % colors components in the input image are quantized to k-bit precision, % so that Cmax= 2k-1, the tree would need k levels below the root node to % allow representing each possible input color in a leaf. This becomes % prohibitive because the tree's total number of nodes is 1 + % sum(i=1, k, 8k). % % A complete tree would require 19,173,961 nodes for k = 8, Cmax = 255. % Therefore, to avoid building a fully populated tree, QUANTIZE: (1) % Initializes data structures for nodes only as they are needed; (2) % Chooses a maximum depth for the tree as a function of the desired % number of colors in the output image (currently log2(colormap size)). % % For each pixel in the input image, storage_class scans downward from % the root of the color description tree. At each level of the tree it % identifies the single node which represents a cube in RGB space % containing the pixel's color. It updates the following data for each % such node: % % n1: Number of pixels whose color is contained in the RGB cube which % this node represents; % % n2: Number of pixels whose color is not represented in a node at % lower depth in the tree; initially, n2 = 0 for all nodes except % leaves of the tree. % % Sr, Sg, Sb: Sums of the red, green, and blue component values for all % pixels not classified at a lower depth. The combination of these sums % and n2 will ultimately characterize the mean color of a set of pixels % represented by this node. % % E: the distance squared in RGB space between each pixel contained % within a node and the nodes' center. This represents the % quantization error for a node. % % Reduction repeatedly prunes the tree until the number of nodes with n2 % > 0 is less than or equal to the maximum number of colors allowed in % the output image. On any given iteration over the tree, it selects % those nodes whose E count is minimal for pruning and merges their color % statistics upward. It uses a pruning threshold, Ep, to govern node % selection as follows: % % Ep = 0 % while number of nodes with (n2 > 0) > required maximum number of colors % prune all nodes such that E <= Ep % Set Ep to minimum E in remaining nodes % % This has the effect of minimizing any quantization error when merging % two nodes together. % % When a node to be pruned has offspring, the pruning procedure invokes % itself recursively in order to prune the tree from the leaves upward. % n2, Sr, Sg, and Sb in a node being pruned are always added to the % corresponding data in that node's parent. This retains the pruned % node's color characteristics for later averaging. % % For each node, n2 pixels exist for which that node represents the % smallest volume in RGB space containing those pixel's colors. When n2 % > 0 the node will uniquely define a color in the output image. At the % beginning of reduction, n2 = 0 for all nodes except a the leaves of % the tree which represent colors present in the input image. % % The other pixel count, n1, indicates the total number of colors within % the cubic volume which the node represents. This includes n1 - n2 % pixels whose colors should be defined by nodes at a lower level in the % tree. % % Assignment generates the output image from the pruned tree. The output % image consists of two parts: (1) A color map, which is an array of % color descriptions (RGB triples) for each color present in the output % image; (2) A pixel array, which represents each pixel as an index % into the color map array. % % First, the assignment phase makes one pass over the pruned color % description tree to establish the image's color map. For each node % with n2 > 0, it divides Sr, Sg, and Sb by n2 . This produces the mean % color of all pixels that classify no lower than this node. Each of % these colors becomes an entry in the color map. % % Finally, the assignment phase reclassifies each pixel in the pruned % tree to identify the deepest node containing the pixel's color. The % pixel's value in the pixel array becomes the index of this node's mean % color in the color map. % % This method is based on a similar algorithm written by Paul Raveling. % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/cache-view.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colormap.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/compare.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/histogram.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/quantize.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/random_.h" #include "MagickCore/resource_.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" /* Define declarations. */ #if !defined(__APPLE__) && !defined(TARGET_OS_IPHONE) #define CacheShift 2 #else #define CacheShift 3 #endif #define ErrorQueueLength 16 #define ErrorRelativeWeight PerceptibleReciprocal(16) #define MaxNodes 266817 #define MaxTreeDepth 8 #define NodesInAList 1920 /* Typdef declarations. */ typedef struct _DoublePixelPacket { double red, green, blue, alpha; } DoublePixelPacket; typedef struct _NodeInfo { struct _NodeInfo *parent, *child[16]; MagickSizeType number_unique; DoublePixelPacket total_color; double quantize_error; size_t color_number, id, level; } NodeInfo; typedef struct _Nodes { NodeInfo *nodes; struct _Nodes *next; } Nodes; typedef struct _CubeInfo { NodeInfo *root; size_t colors, maximum_colors; ssize_t transparent_index; MagickSizeType transparent_pixels; DoublePixelPacket target; double distance, pruning_threshold, next_threshold; size_t nodes, free_nodes, color_number; NodeInfo *next_node; Nodes *node_queue; MemoryInfo *memory_info; ssize_t *cache; DoublePixelPacket error[ErrorQueueLength]; double diffusion, weights[ErrorQueueLength]; QuantizeInfo *quantize_info; MagickBooleanType associate_alpha; ssize_t x, y; size_t depth; MagickOffsetType offset; MagickSizeType span; } CubeInfo; /* Method prototypes. */ static CubeInfo *GetCubeInfo(const QuantizeInfo *,const size_t,const size_t); static NodeInfo *GetNodeInfo(CubeInfo *,const size_t,const size_t,NodeInfo *); static MagickBooleanType AssignImageColors(Image *,CubeInfo *,ExceptionInfo *), ClassifyImageColors(CubeInfo *,const Image *,ExceptionInfo *), DitherImage(Image *,CubeInfo *,ExceptionInfo *), SetGrayscaleImage(Image *,ExceptionInfo *), SetImageColormap(Image *,CubeInfo *,ExceptionInfo *); static void ClosestColor(const Image *,CubeInfo *,const NodeInfo *), DefineImageColormap(Image *,CubeInfo *,NodeInfo *), DestroyCubeInfo(CubeInfo *), PruneLevel(CubeInfo *,const NodeInfo *), PruneToCubeDepth(CubeInfo *,const NodeInfo *), ReduceImageColors(const Image *,CubeInfo *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e Q u a n t i z e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireQuantizeInfo() allocates the QuantizeInfo structure. % % The format of the AcquireQuantizeInfo method is: % % QuantizeInfo *AcquireQuantizeInfo(const ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: the image info. % */ MagickExport QuantizeInfo *AcquireQuantizeInfo(const ImageInfo *image_info) { QuantizeInfo *quantize_info; quantize_info=(QuantizeInfo *) AcquireCriticalMemory(sizeof(*quantize_info)); GetQuantizeInfo(quantize_info); if (image_info != (ImageInfo *) NULL) { const char *option; quantize_info->dither_method=image_info->dither == MagickFalse ? NoDitherMethod : RiemersmaDitherMethod; option=GetImageOption(image_info,"dither"); if (option != (const char *) NULL) quantize_info->dither_method=(DitherMethod) ParseCommandOption( MagickDitherOptions,MagickFalse,option); quantize_info->measure_error=image_info->verbose; } return(quantize_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + A s s i g n I m a g e C o l o r s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AssignImageColors() generates the output image from the pruned tree. The % output image consists of two parts: (1) A color map, which is an array % of color descriptions (RGB triples) for each color present in the % output image; (2) A pixel array, which represents each pixel as an % index into the color map array. % % First, the assignment phase makes one pass over the pruned color % description tree to establish the image's color map. For each node % with n2 > 0, it divides Sr, Sg, and Sb by n2 . This produces the mean % color of all pixels that classify no lower than this node. Each of % these colors becomes an entry in the color map. % % Finally, the assignment phase reclassifies each pixel in the pruned % tree to identify the deepest node containing the pixel's color. The % pixel's value in the pixel array becomes the index of this node's mean % color in the color map. % % The format of the AssignImageColors() method is: % % MagickBooleanType AssignImageColors(Image *image,CubeInfo *cube_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % */ static inline void AssociateAlphaPixel(const Image *image, const CubeInfo *cube_info,const Quantum *pixel,DoublePixelPacket *alpha_pixel) { double alpha; if ((cube_info->associate_alpha == MagickFalse) || (GetPixelAlpha(image,pixel) == OpaqueAlpha)) { alpha_pixel->red=(double) GetPixelRed(image,pixel); alpha_pixel->green=(double) GetPixelGreen(image,pixel); alpha_pixel->blue=(double) GetPixelBlue(image,pixel); alpha_pixel->alpha=(double) GetPixelAlpha(image,pixel); return; } alpha=(double) (QuantumScale*GetPixelAlpha(image,pixel)); alpha_pixel->red=alpha*GetPixelRed(image,pixel); alpha_pixel->green=alpha*GetPixelGreen(image,pixel); alpha_pixel->blue=alpha*GetPixelBlue(image,pixel); alpha_pixel->alpha=(double) GetPixelAlpha(image,pixel); } static inline void AssociateAlphaPixelInfo(const CubeInfo *cube_info, const PixelInfo *pixel,DoublePixelPacket *alpha_pixel) { double alpha; if ((cube_info->associate_alpha == MagickFalse) || (pixel->alpha == OpaqueAlpha)) { alpha_pixel->red=(double) pixel->red; alpha_pixel->green=(double) pixel->green; alpha_pixel->blue=(double) pixel->blue; alpha_pixel->alpha=(double) pixel->alpha; return; } alpha=(double) (QuantumScale*pixel->alpha); alpha_pixel->red=alpha*pixel->red; alpha_pixel->green=alpha*pixel->green; alpha_pixel->blue=alpha*pixel->blue; alpha_pixel->alpha=(double) pixel->alpha; } static inline size_t ColorToNodeId(const CubeInfo *cube_info, const DoublePixelPacket *pixel,size_t index) { size_t id; id=(size_t) (((ScaleQuantumToChar(ClampPixel(pixel->red)) >> index) & 0x01) | ((ScaleQuantumToChar(ClampPixel(pixel->green)) >> index) & 0x01) << 1 | ((ScaleQuantumToChar(ClampPixel(pixel->blue)) >> index) & 0x01) << 2); if (cube_info->associate_alpha != MagickFalse) id|=((ScaleQuantumToChar(ClampPixel(pixel->alpha)) >> index) & 0x1) << 3; return(id); } static MagickBooleanType AssignImageColors(Image *image,CubeInfo *cube_info, ExceptionInfo *exception) { #define AssignImageTag "Assign/Image" ColorspaceType colorspace; ssize_t y; /* Allocate image colormap. */ colorspace=image->colorspace; if (cube_info->quantize_info->colorspace != UndefinedColorspace) (void) TransformImageColorspace(image,cube_info->quantize_info->colorspace, exception); cube_info->transparent_pixels=0; cube_info->transparent_index=(-1); if (SetImageColormap(image,cube_info,exception) == MagickFalse) return(MagickFalse); /* Create a reduced color image. */ if (cube_info->quantize_info->dither_method != NoDitherMethod) (void) DitherImage(image,cube_info,exception); else { CacheView *image_view; MagickBooleanType status; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { CubeInfo cube; Quantum *magick_restrict q; ssize_t count, x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } cube=(*cube_info); for (x=0; x < (ssize_t) image->columns; x+=count) { DoublePixelPacket pixel; const NodeInfo *node_info; ssize_t i; size_t id, index; /* Identify the deepest node containing the pixel's color. */ for (count=1; (x+count) < (ssize_t) image->columns; count++) { PixelInfo packet; GetPixelInfoPixel(image,q+count*GetPixelChannels(image),&packet); if (IsPixelEquivalent(image,q,&packet) == MagickFalse) break; } AssociateAlphaPixel(image,&cube,q,&pixel); node_info=cube.root; for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--) { id=ColorToNodeId(&cube,&pixel,index); if (node_info->child[id] == (NodeInfo *) NULL) break; node_info=node_info->child[id]; } /* Find closest color among siblings and their children. */ cube.target=pixel; cube.distance=(double) (4.0*(QuantumRange+1.0)*(QuantumRange+1.0)+ 1.0); ClosestColor(image,&cube,node_info->parent); index=cube.color_number; for (i=0; i < (ssize_t) count; i++) { if (image->storage_class == PseudoClass) SetPixelIndex(image,(Quantum) index,q); if (cube.quantize_info->measure_error == MagickFalse) { SetPixelRed(image,ClampToQuantum( image->colormap[index].red),q); SetPixelGreen(image,ClampToQuantum( image->colormap[index].green),q); SetPixelBlue(image,ClampToQuantum( image->colormap[index].blue),q); if (cube.associate_alpha != MagickFalse) SetPixelAlpha(image,ClampToQuantum( image->colormap[index].alpha),q); } q+=GetPixelChannels(image); } } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); } if (cube_info->quantize_info->measure_error != MagickFalse) (void) GetImageQuantizeError(image,exception); if ((cube_info->quantize_info->number_colors == 2) && (IsGrayColorspace(cube_info->quantize_info->colorspace))) { double intensity; /* Monochrome image. */ intensity=GetPixelInfoLuma(image->colormap+0) < QuantumRange/2.0 ? 0.0 : QuantumRange; if (image->colors > 1) { intensity=0.0; if (GetPixelInfoLuma(image->colormap+0) > GetPixelInfoLuma(image->colormap+1)) intensity=(double) QuantumRange; } image->colormap[0].red=intensity; image->colormap[0].green=intensity; image->colormap[0].blue=intensity; if (image->colors > 1) { image->colormap[1].red=(double) QuantumRange-intensity; image->colormap[1].green=(double) QuantumRange-intensity; image->colormap[1].blue=(double) QuantumRange-intensity; } } (void) SyncImage(image,exception); if ((cube_info->quantize_info->colorspace != UndefinedColorspace) && (IssRGBCompatibleColorspace(colorspace) == MagickFalse)) (void) TransformImageColorspace(image,colorspace,exception); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l a s s i f y I m a g e C o l o r s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClassifyImageColors() begins by initializing a color description tree % of sufficient depth to represent each possible input color in a leaf. % However, it is impractical to generate a fully-formed color % description tree in the storage_class phase for realistic values of % Cmax. If colors components in the input image are quantized to k-bit % precision, so that Cmax= 2k-1, the tree would need k levels below the % root node to allow representing each possible input color in a leaf. % This becomes prohibitive because the tree's total number of nodes is % 1 + sum(i=1,k,8k). % % A complete tree would require 19,173,961 nodes for k = 8, Cmax = 255. % Therefore, to avoid building a fully populated tree, QUANTIZE: (1) % Initializes data structures for nodes only as they are needed; (2) % Chooses a maximum depth for the tree as a function of the desired % number of colors in the output image (currently log2(colormap size)). % % For each pixel in the input image, storage_class scans downward from % the root of the color description tree. At each level of the tree it % identifies the single node which represents a cube in RGB space % containing It updates the following data for each such node: % % n1 : Number of pixels whose color is contained in the RGB cube % which this node represents; % % n2 : Number of pixels whose color is not represented in a node at % lower depth in the tree; initially, n2 = 0 for all nodes except % leaves of the tree. % % Sr, Sg, Sb : Sums of the red, green, and blue component values for % all pixels not classified at a lower depth. The combination of % these sums and n2 will ultimately characterize the mean color of a % set of pixels represented by this node. % % E: the distance squared in RGB space between each pixel contained % within a node and the nodes' center. This represents the quantization % error for a node. % % The format of the ClassifyImageColors() method is: % % MagickBooleanType ClassifyImageColors(CubeInfo *cube_info, % const Image *image,ExceptionInfo *exception) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o image: the image. % */ static inline void SetAssociatedAlpha(const Image *image,CubeInfo *cube_info) { MagickBooleanType associate_alpha; associate_alpha=image->alpha_trait != UndefinedPixelTrait ? MagickTrue : MagickFalse; if ((cube_info->quantize_info->number_colors == 2) && ((cube_info->quantize_info->colorspace == LinearGRAYColorspace) || (cube_info->quantize_info->colorspace == GRAYColorspace))) associate_alpha=MagickFalse; cube_info->associate_alpha=associate_alpha; } static MagickBooleanType ClassifyImageColors(CubeInfo *cube_info, const Image *image,ExceptionInfo *exception) { #define ClassifyImageTag "Classify/Image" CacheView *image_view; double bisect; DoublePixelPacket error, mid, midpoint, pixel; MagickBooleanType proceed; NodeInfo *node_info; size_t count, id, index, level; ssize_t y; /* Classify the first cube_info->maximum_colors colors to a tree depth of 8. */ SetAssociatedAlpha(image,cube_info); if (cube_info->quantize_info->colorspace != image->colorspace) { if ((cube_info->quantize_info->colorspace != UndefinedColorspace) && (cube_info->quantize_info->colorspace != CMYKColorspace)) (void) TransformImageColorspace((Image *) image, cube_info->quantize_info->colorspace,exception); else if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) (void) TransformImageColorspace((Image *) image,sRGBColorspace, exception); } midpoint.red=(double) QuantumRange/2.0; midpoint.green=(double) QuantumRange/2.0; midpoint.blue=(double) QuantumRange/2.0; midpoint.alpha=(double) QuantumRange/2.0; error.alpha=0.0; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *magick_restrict p; ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; if (cube_info->nodes > MaxNodes) { /* Prune one level if the color tree is too large. */ PruneLevel(cube_info,cube_info->root); cube_info->depth--; } for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) count) { /* Start at the root and descend the color cube tree. */ for (count=1; (x+(ssize_t) count) < (ssize_t) image->columns; count++) { PixelInfo packet; GetPixelInfoPixel(image,p+count*GetPixelChannels(image),&packet); if (IsPixelEquivalent(image,p,&packet) == MagickFalse) break; } AssociateAlphaPixel(image,cube_info,p,&pixel); index=MaxTreeDepth-1; bisect=((double) QuantumRange+1.0)/2.0; mid=midpoint; node_info=cube_info->root; for (level=1; level <= MaxTreeDepth; level++) { double distance; bisect*=0.5; id=ColorToNodeId(cube_info,&pixel,index); mid.red+=(id & 1) != 0 ? bisect : -bisect; mid.green+=(id & 2) != 0 ? bisect : -bisect; mid.blue+=(id & 4) != 0 ? bisect : -bisect; mid.alpha+=(id & 8) != 0 ? bisect : -bisect; if (node_info->child[id] == (NodeInfo *) NULL) { /* Set colors of new node to contain pixel. */ node_info->child[id]=GetNodeInfo(cube_info,id,level,node_info); if (node_info->child[id] == (NodeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); continue; } if (level == MaxTreeDepth) cube_info->colors++; } /* Approximate the quantization error represented by this node. */ node_info=node_info->child[id]; error.red=QuantumScale*(pixel.red-mid.red); error.green=QuantumScale*(pixel.green-mid.green); error.blue=QuantumScale*(pixel.blue-mid.blue); if (cube_info->associate_alpha != MagickFalse) error.alpha=QuantumScale*(pixel.alpha-mid.alpha); distance=(double) (error.red*error.red+error.green*error.green+ error.blue*error.blue+error.alpha*error.alpha); if (IsNaN(distance) != 0) distance=0.0; node_info->quantize_error+=count*sqrt(distance); cube_info->root->quantize_error+=node_info->quantize_error; index--; } /* Sum RGB for this leaf for later derivation of the mean cube color. */ node_info->number_unique+=count; node_info->total_color.red+=count*QuantumScale*ClampPixel(pixel.red); node_info->total_color.green+=count*QuantumScale*ClampPixel(pixel.green); node_info->total_color.blue+=count*QuantumScale*ClampPixel(pixel.blue); if (cube_info->associate_alpha != MagickFalse) node_info->total_color.alpha+=count*QuantumScale* ClampPixel(pixel.alpha); else node_info->total_color.alpha+=count*QuantumScale* ClampPixel((MagickRealType) OpaqueAlpha); p+=count*GetPixelChannels(image); } if (cube_info->colors > cube_info->maximum_colors) { PruneToCubeDepth(cube_info,cube_info->root); break; } proceed=SetImageProgress(image,ClassifyImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) break; } for (y++; y < (ssize_t) image->rows; y++) { const Quantum *magick_restrict p; ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; if (cube_info->nodes > MaxNodes) { /* Prune one level if the color tree is too large. */ PruneLevel(cube_info,cube_info->root); cube_info->depth--; } for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) count) { /* Start at the root and descend the color cube tree. */ for (count=1; (x+(ssize_t) count) < (ssize_t) image->columns; count++) { PixelInfo packet; GetPixelInfoPixel(image,p+count*GetPixelChannels(image),&packet); if (IsPixelEquivalent(image,p,&packet) == MagickFalse) break; } AssociateAlphaPixel(image,cube_info,p,&pixel); index=MaxTreeDepth-1; bisect=((double) QuantumRange+1.0)/2.0; mid=midpoint; node_info=cube_info->root; for (level=1; level <= cube_info->depth; level++) { double distance; bisect*=0.5; id=ColorToNodeId(cube_info,&pixel,index); mid.red+=(id & 1) != 0 ? bisect : -bisect; mid.green+=(id & 2) != 0 ? bisect : -bisect; mid.blue+=(id & 4) != 0 ? bisect : -bisect; mid.alpha+=(id & 8) != 0 ? bisect : -bisect; if (node_info->child[id] == (NodeInfo *) NULL) { /* Set colors of new node to contain pixel. */ node_info->child[id]=GetNodeInfo(cube_info,id,level,node_info); if (node_info->child[id] == (NodeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","%s", image->filename); continue; } if (level == cube_info->depth) cube_info->colors++; } /* Approximate the quantization error represented by this node. */ node_info=node_info->child[id]; error.red=QuantumScale*(pixel.red-mid.red); error.green=QuantumScale*(pixel.green-mid.green); error.blue=QuantumScale*(pixel.blue-mid.blue); if (cube_info->associate_alpha != MagickFalse) error.alpha=QuantumScale*(pixel.alpha-mid.alpha); distance=(double) (error.red*error.red+error.green*error.green+ error.blue*error.blue+error.alpha*error.alpha); if (IsNaN(distance) != 0) distance=0.0; node_info->quantize_error+=count*sqrt(distance); cube_info->root->quantize_error+=node_info->quantize_error; index--; } /* Sum RGB for this leaf for later derivation of the mean cube color. */ node_info->number_unique+=count; node_info->total_color.red+=count*QuantumScale*ClampPixel(pixel.red); node_info->total_color.green+=count*QuantumScale*ClampPixel(pixel.green); node_info->total_color.blue+=count*QuantumScale*ClampPixel(pixel.blue); if (cube_info->associate_alpha != MagickFalse) node_info->total_color.alpha+=count*QuantumScale* ClampPixel(pixel.alpha); else node_info->total_color.alpha+=count*QuantumScale* ClampPixel((MagickRealType) OpaqueAlpha); p+=count*GetPixelChannels(image); } proceed=SetImageProgress(image,ClassifyImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) break; } image_view=DestroyCacheView(image_view); if (cube_info->quantize_info->colorspace != image->colorspace) if ((cube_info->quantize_info->colorspace != UndefinedColorspace) && (cube_info->quantize_info->colorspace != CMYKColorspace)) (void) TransformImageColorspace((Image *) image,sRGBColorspace,exception); return(y < (ssize_t) image->rows ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e Q u a n t i z e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneQuantizeInfo() makes a duplicate of the given quantize info structure, % or if quantize info is NULL, a new one. % % The format of the CloneQuantizeInfo method is: % % QuantizeInfo *CloneQuantizeInfo(const QuantizeInfo *quantize_info) % % A description of each parameter follows: % % o clone_info: Method CloneQuantizeInfo returns a duplicate of the given % quantize info, or if image info is NULL a new one. % % o quantize_info: a structure of type info. % */ MagickExport QuantizeInfo *CloneQuantizeInfo(const QuantizeInfo *quantize_info) { QuantizeInfo *clone_info; clone_info=(QuantizeInfo *) AcquireCriticalMemory(sizeof(*clone_info)); GetQuantizeInfo(clone_info); if (quantize_info == (QuantizeInfo *) NULL) return(clone_info); clone_info->number_colors=quantize_info->number_colors; clone_info->tree_depth=quantize_info->tree_depth; clone_info->dither_method=quantize_info->dither_method; clone_info->colorspace=quantize_info->colorspace; clone_info->measure_error=quantize_info->measure_error; return(clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l o s e s t C o l o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClosestColor() traverses the color cube tree at a particular node and % determines which colormap entry best represents the input color. % % The format of the ClosestColor method is: % % void ClosestColor(const Image *image,CubeInfo *cube_info, % const NodeInfo *node_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % % o node_info: the address of a structure of type NodeInfo which points to a % node in the color cube tree that is to be pruned. % */ static void ClosestColor(const Image *image,CubeInfo *cube_info, const NodeInfo *node_info) { size_t number_children; ssize_t i; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) ClosestColor(image,cube_info,node_info->child[i]); if (node_info->number_unique != 0) { double alpha, beta, distance, pixel; DoublePixelPacket *magick_restrict q; PixelInfo *magick_restrict p; /* Determine if this color is "closest". */ p=image->colormap+node_info->color_number; q=(&cube_info->target); alpha=1.0; beta=1.0; if (cube_info->associate_alpha != MagickFalse) { alpha=(MagickRealType) (QuantumScale*p->alpha); beta=(MagickRealType) (QuantumScale*q->alpha); } pixel=alpha*p->red-beta*q->red; distance=pixel*pixel; if (distance <= cube_info->distance) { pixel=alpha*p->green-beta*q->green; distance+=pixel*pixel; if (distance <= cube_info->distance) { pixel=alpha*p->blue-beta*q->blue; distance+=pixel*pixel; if (distance <= cube_info->distance) { if (cube_info->associate_alpha != MagickFalse) { pixel=p->alpha-q->alpha; distance+=pixel*pixel; } if (distance <= cube_info->distance) { cube_info->distance=distance; cube_info->color_number=node_info->color_number; } } } } } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o m p r e s s I m a g e C o l o r m a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CompressImageColormap() compresses an image colormap by removing any % duplicate or unused color entries. % % The format of the CompressImageColormap method is: % % MagickBooleanType CompressImageColormap(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType CompressImageColormap(Image *image, ExceptionInfo *exception) { QuantizeInfo quantize_info; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (IsPaletteImage(image) == MagickFalse) return(MagickFalse); GetQuantizeInfo(&quantize_info); quantize_info.number_colors=image->colors; quantize_info.tree_depth=MaxTreeDepth; return(QuantizeImage(&quantize_info,image,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e f i n e I m a g e C o l o r m a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DefineImageColormap() traverses the color cube tree and notes each colormap % entry. A colormap entry is any node in the color cube tree where the % of unique colors is not zero. % % The format of the DefineImageColormap method is: % % void DefineImageColormap(Image *image,CubeInfo *cube_info, % NodeInfo *node_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % % o node_info: the address of a structure of type NodeInfo which points to a % node in the color cube tree that is to be pruned. % */ static void DefineImageColormap(Image *image,CubeInfo *cube_info, NodeInfo *node_info) { size_t number_children; ssize_t i; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) DefineImageColormap(image,cube_info,node_info->child[i]); if (node_info->number_unique != 0) { double alpha; PixelInfo *magick_restrict q; /* Colormap entry is defined by the mean color in this cube. */ q=image->colormap+image->colors; alpha=(double) ((MagickOffsetType) node_info->number_unique); alpha=PerceptibleReciprocal(alpha); if (cube_info->associate_alpha == MagickFalse) { q->red=(double) ClampToQuantum(alpha*QuantumRange* node_info->total_color.red); q->green=(double) ClampToQuantum(alpha*QuantumRange* node_info->total_color.green); q->blue=(double) ClampToQuantum(alpha*QuantumRange* node_info->total_color.blue); q->alpha=(double) OpaqueAlpha; } else { double opacity; opacity=(double) (alpha*QuantumRange*node_info->total_color.alpha); q->alpha=(double) ClampToQuantum(opacity); if (q->alpha == OpaqueAlpha) { q->red=(double) ClampToQuantum(alpha*QuantumRange* node_info->total_color.red); q->green=(double) ClampToQuantum(alpha*QuantumRange* node_info->total_color.green); q->blue=(double) ClampToQuantum(alpha*QuantumRange* node_info->total_color.blue); } else { double gamma; gamma=(double) (QuantumScale*q->alpha); gamma=PerceptibleReciprocal(gamma); q->red=(double) ClampToQuantum(alpha*gamma*QuantumRange* node_info->total_color.red); q->green=(double) ClampToQuantum(alpha*gamma*QuantumRange* node_info->total_color.green); q->blue=(double) ClampToQuantum(alpha*gamma*QuantumRange* node_info->total_color.blue); if (node_info->number_unique > cube_info->transparent_pixels) { cube_info->transparent_pixels=node_info->number_unique; cube_info->transparent_index=(ssize_t) image->colors; } } } node_info->color_number=image->colors++; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y C u b e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyCubeInfo() deallocates memory associated with an image. % % The format of the DestroyCubeInfo method is: % % DestroyCubeInfo(CubeInfo *cube_info) % % A description of each parameter follows: % % o cube_info: the address of a structure of type CubeInfo. % */ static void DestroyCubeInfo(CubeInfo *cube_info) { Nodes *nodes; /* Release color cube tree storage. */ do { nodes=cube_info->node_queue->next; cube_info->node_queue->nodes=(NodeInfo *) RelinquishMagickMemory( cube_info->node_queue->nodes); cube_info->node_queue=(Nodes *) RelinquishMagickMemory( cube_info->node_queue); cube_info->node_queue=nodes; } while (cube_info->node_queue != (Nodes *) NULL); if (cube_info->memory_info != (MemoryInfo *) NULL) cube_info->memory_info=RelinquishVirtualMemory(cube_info->memory_info); cube_info->quantize_info=DestroyQuantizeInfo(cube_info->quantize_info); cube_info=(CubeInfo *) RelinquishMagickMemory(cube_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y Q u a n t i z e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyQuantizeInfo() deallocates memory associated with an QuantizeInfo % structure. % % The format of the DestroyQuantizeInfo method is: % % QuantizeInfo *DestroyQuantizeInfo(QuantizeInfo *quantize_info) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % */ MagickExport QuantizeInfo *DestroyQuantizeInfo(QuantizeInfo *quantize_info) { (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(quantize_info != (QuantizeInfo *) NULL); assert(quantize_info->signature == MagickCoreSignature); quantize_info->signature=(~MagickCoreSignature); quantize_info=(QuantizeInfo *) RelinquishMagickMemory(quantize_info); return(quantize_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D i t h e r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DitherImage() distributes the difference between an original image and % the corresponding color reduced algorithm to neighboring pixels using % serpentine-scan Floyd-Steinberg error diffusion. DitherImage returns % MagickTrue if the image is dithered otherwise MagickFalse. % % The format of the DitherImage method is: % % MagickBooleanType DitherImage(Image *image,CubeInfo *cube_info, % ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % % o exception: return any errors or warnings in this structure. % */ static DoublePixelPacket **DestroyPixelThreadSet(DoublePixelPacket **pixels) { ssize_t i; assert(pixels != (DoublePixelPacket **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (pixels[i] != (DoublePixelPacket *) NULL) pixels[i]=(DoublePixelPacket *) RelinquishMagickMemory(pixels[i]); pixels=(DoublePixelPacket **) RelinquishMagickMemory(pixels); return(pixels); } static DoublePixelPacket **AcquirePixelThreadSet(const size_t count) { DoublePixelPacket **pixels; size_t number_threads; ssize_t i; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); pixels=(DoublePixelPacket **) AcquireQuantumMemory(number_threads, sizeof(*pixels)); if (pixels == (DoublePixelPacket **) NULL) return((DoublePixelPacket **) NULL); (void) memset(pixels,0,number_threads*sizeof(*pixels)); for (i=0; i < (ssize_t) number_threads; i++) { pixels[i]=(DoublePixelPacket *) AcquireQuantumMemory(count,2* sizeof(**pixels)); if (pixels[i] == (DoublePixelPacket *) NULL) return(DestroyPixelThreadSet(pixels)); } return(pixels); } static inline ssize_t CacheOffset(CubeInfo *cube_info, const DoublePixelPacket *pixel) { #define RedShift(pixel) (((pixel) >> CacheShift) << (0*(8-CacheShift))) #define GreenShift(pixel) (((pixel) >> CacheShift) << (1*(8-CacheShift))) #define BlueShift(pixel) (((pixel) >> CacheShift) << (2*(8-CacheShift))) #define AlphaShift(pixel) (((pixel) >> CacheShift) << (3*(8-CacheShift))) ssize_t offset; offset=(ssize_t) (RedShift(ScaleQuantumToChar(ClampPixel(pixel->red))) | GreenShift(ScaleQuantumToChar(ClampPixel(pixel->green))) | BlueShift(ScaleQuantumToChar(ClampPixel(pixel->blue)))); if (cube_info->associate_alpha != MagickFalse) offset|=AlphaShift(ScaleQuantumToChar(ClampPixel(pixel->alpha))); return(offset); } static MagickBooleanType FloydSteinbergDither(Image *image,CubeInfo *cube_info, ExceptionInfo *exception) { #define DitherImageTag "Dither/Image" CacheView *image_view; DoublePixelPacket **pixels; MagickBooleanType status; ssize_t y; /* Distribute quantization error using Floyd-Steinberg. */ pixels=AcquirePixelThreadSet(image->columns); if (pixels == (DoublePixelPacket **) NULL) return(MagickFalse); status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); CubeInfo cube; DoublePixelPacket *current, *previous; Quantum *magick_restrict q; size_t index; ssize_t x, v; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } cube=(*cube_info); current=pixels[id]+(y & 0x01)*image->columns; previous=pixels[id]+((y+1) & 0x01)*image->columns; v=(ssize_t) ((y & 0x01) != 0 ? -1 : 1); for (x=0; x < (ssize_t) image->columns; x++) { DoublePixelPacket color, pixel; ssize_t i; ssize_t u; u=(y & 0x01) != 0 ? (ssize_t) image->columns-1-x : x; AssociateAlphaPixel(image,&cube,q+u*GetPixelChannels(image),&pixel); if (x > 0) { pixel.red+=7.0*cube_info->diffusion*current[u-v].red/16; pixel.green+=7.0*cube_info->diffusion*current[u-v].green/16; pixel.blue+=7.0*cube_info->diffusion*current[u-v].blue/16; if (cube.associate_alpha != MagickFalse) pixel.alpha+=7.0*cube_info->diffusion*current[u-v].alpha/16; } if (y > 0) { if (x < (ssize_t) (image->columns-1)) { pixel.red+=cube_info->diffusion*previous[u+v].red/16; pixel.green+=cube_info->diffusion*previous[u+v].green/16; pixel.blue+=cube_info->diffusion*previous[u+v].blue/16; if (cube.associate_alpha != MagickFalse) pixel.alpha+=cube_info->diffusion*previous[u+v].alpha/16; } pixel.red+=5.0*cube_info->diffusion*previous[u].red/16; pixel.green+=5.0*cube_info->diffusion*previous[u].green/16; pixel.blue+=5.0*cube_info->diffusion*previous[u].blue/16; if (cube.associate_alpha != MagickFalse) pixel.alpha+=5.0*cube_info->diffusion*previous[u].alpha/16; if (x > 0) { pixel.red+=3.0*cube_info->diffusion*previous[u-v].red/16; pixel.green+=3.0*cube_info->diffusion*previous[u-v].green/16; pixel.blue+=3.0*cube_info->diffusion*previous[u-v].blue/16; if (cube.associate_alpha != MagickFalse) pixel.alpha+=3.0*cube_info->diffusion*previous[u-v].alpha/16; } } pixel.red=(double) ClampPixel(pixel.red); pixel.green=(double) ClampPixel(pixel.green); pixel.blue=(double) ClampPixel(pixel.blue); if (cube.associate_alpha != MagickFalse) pixel.alpha=(double) ClampPixel(pixel.alpha); i=CacheOffset(&cube,&pixel); if (cube.cache[i] < 0) { NodeInfo *node_info; size_t node_id; /* Identify the deepest node containing the pixel's color. */ node_info=cube.root; for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--) { node_id=ColorToNodeId(&cube,&pixel,index); if (node_info->child[node_id] == (NodeInfo *) NULL) break; node_info=node_info->child[node_id]; } /* Find closest color among siblings and their children. */ cube.target=pixel; cube.distance=(double) (4.0*(QuantumRange+1.0)*(QuantumRange+1.0)+ 1.0); ClosestColor(image,&cube,node_info->parent); cube.cache[i]=(ssize_t) cube.color_number; } /* Assign pixel to closest colormap entry. */ index=(size_t) cube.cache[i]; if (image->storage_class == PseudoClass) SetPixelIndex(image,(Quantum) index,q+u*GetPixelChannels(image)); if (cube.quantize_info->measure_error == MagickFalse) { SetPixelRed(image,ClampToQuantum(image->colormap[index].red), q+u*GetPixelChannels(image)); SetPixelGreen(image,ClampToQuantum(image->colormap[index].green), q+u*GetPixelChannels(image)); SetPixelBlue(image,ClampToQuantum(image->colormap[index].blue), q+u*GetPixelChannels(image)); if (cube.associate_alpha != MagickFalse) SetPixelAlpha(image,ClampToQuantum(image->colormap[index].alpha), q+u*GetPixelChannels(image)); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; /* Store the error. */ AssociateAlphaPixelInfo(&cube,image->colormap+index,&color); current[u].red=pixel.red-color.red; current[u].green=pixel.green-color.green; current[u].blue=pixel.blue-color.blue; if (cube.associate_alpha != MagickFalse) current[u].alpha=pixel.alpha-color.alpha; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,DitherImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } } image_view=DestroyCacheView(image_view); pixels=DestroyPixelThreadSet(pixels); return(MagickTrue); } static MagickBooleanType RiemersmaDither(Image *image,CacheView *image_view, CubeInfo *cube_info,const unsigned int direction,ExceptionInfo *exception) { #define DitherImageTag "Dither/Image" CubeInfo *p; DoublePixelPacket color, pixel; MagickBooleanType proceed; size_t index; p=cube_info; if ((p->x >= 0) && (p->x < (ssize_t) image->columns) && (p->y >= 0) && (p->y < (ssize_t) image->rows)) { Quantum *magick_restrict q; ssize_t i; /* Distribute error. */ q=GetCacheViewAuthenticPixels(image_view,p->x,p->y,1,1,exception); if (q == (Quantum *) NULL) return(MagickFalse); AssociateAlphaPixel(image,cube_info,q,&pixel); for (i=0; i < ErrorQueueLength; i++) { pixel.red+=ErrorRelativeWeight*cube_info->diffusion*p->weights[i]* p->error[i].red; pixel.green+=ErrorRelativeWeight*cube_info->diffusion*p->weights[i]* p->error[i].green; pixel.blue+=ErrorRelativeWeight*cube_info->diffusion*p->weights[i]* p->error[i].blue; if (cube_info->associate_alpha != MagickFalse) pixel.alpha+=ErrorRelativeWeight*cube_info->diffusion*p->weights[i]* p->error[i].alpha; } pixel.red=(double) ClampPixel(pixel.red); pixel.green=(double) ClampPixel(pixel.green); pixel.blue=(double) ClampPixel(pixel.blue); if (cube_info->associate_alpha != MagickFalse) pixel.alpha=(double) ClampPixel(pixel.alpha); i=CacheOffset(cube_info,&pixel); if (p->cache[i] < 0) { NodeInfo *node_info; size_t id; /* Identify the deepest node containing the pixel's color. */ node_info=p->root; for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--) { id=ColorToNodeId(cube_info,&pixel,index); if (node_info->child[id] == (NodeInfo *) NULL) break; node_info=node_info->child[id]; } /* Find closest color among siblings and their children. */ p->target=pixel; p->distance=(double) (4.0*(QuantumRange+1.0)*((double) QuantumRange+1.0)+1.0); ClosestColor(image,p,node_info->parent); p->cache[i]=(ssize_t) p->color_number; } /* Assign pixel to closest colormap entry. */ index=(size_t) p->cache[i]; if (image->storage_class == PseudoClass) SetPixelIndex(image,(Quantum) index,q); if (cube_info->quantize_info->measure_error == MagickFalse) { SetPixelRed(image,ClampToQuantum(image->colormap[index].red),q); SetPixelGreen(image,ClampToQuantum(image->colormap[index].green),q); SetPixelBlue(image,ClampToQuantum(image->colormap[index].blue),q); if (cube_info->associate_alpha != MagickFalse) SetPixelAlpha(image,ClampToQuantum(image->colormap[index].alpha),q); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) return(MagickFalse); /* Propagate the error as the last entry of the error queue. */ (void) memmove(p->error,p->error+1,(ErrorQueueLength-1)* sizeof(p->error[0])); AssociateAlphaPixelInfo(cube_info,image->colormap+index,&color); p->error[ErrorQueueLength-1].red=pixel.red-color.red; p->error[ErrorQueueLength-1].green=pixel.green-color.green; p->error[ErrorQueueLength-1].blue=pixel.blue-color.blue; if (cube_info->associate_alpha != MagickFalse) p->error[ErrorQueueLength-1].alpha=pixel.alpha-color.alpha; proceed=SetImageProgress(image,DitherImageTag,p->offset,p->span); if (proceed == MagickFalse) return(MagickFalse); p->offset++; } switch (direction) { case WestGravity: p->x--; break; case EastGravity: p->x++; break; case NorthGravity: p->y--; break; case SouthGravity: p->y++; break; } return(MagickTrue); } static MagickBooleanType Riemersma(Image *image,CacheView *image_view, CubeInfo *cube_info,const size_t level,const unsigned int direction, ExceptionInfo *exception) { MagickBooleanType status; status=MagickTrue; if (level == 1) switch (direction) { case WestGravity: { status=RiemersmaDither(image,image_view,cube_info,EastGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,SouthGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,WestGravity, exception); break; } case EastGravity: { status=RiemersmaDither(image,image_view,cube_info,WestGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,NorthGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,EastGravity, exception); break; } case NorthGravity: { status=RiemersmaDither(image,image_view,cube_info,SouthGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,EastGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,NorthGravity, exception); break; } case SouthGravity: { status=RiemersmaDither(image,image_view,cube_info,NorthGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,WestGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,SouthGravity, exception); break; } default: break; } else switch (direction) { case WestGravity: { status=Riemersma(image,image_view,cube_info,level-1,NorthGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,EastGravity, exception); if (status != MagickFalse) status=Riemersma(image,image_view,cube_info,level-1,WestGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,SouthGravity, exception); if (status != MagickFalse) status=Riemersma(image,image_view,cube_info,level-1,WestGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,WestGravity, exception); if (status != MagickFalse) status=Riemersma(image,image_view,cube_info,level-1,SouthGravity, exception); break; } case EastGravity: { status=Riemersma(image,image_view,cube_info,level-1,SouthGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,WestGravity, exception); if (status != MagickFalse) status=Riemersma(image,image_view,cube_info,level-1,EastGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,NorthGravity, exception); if (status != MagickFalse) status=Riemersma(image,image_view,cube_info,level-1,EastGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,EastGravity, exception); if (status != MagickFalse) status=Riemersma(image,image_view,cube_info,level-1,NorthGravity, exception); break; } case NorthGravity: { status=Riemersma(image,image_view,cube_info,level-1,WestGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,SouthGravity, exception); if (status != MagickFalse) status=Riemersma(image,image_view,cube_info,level-1,NorthGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,EastGravity, exception); if (status != MagickFalse) status=Riemersma(image,image_view,cube_info,level-1,NorthGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,NorthGravity, exception); if (status != MagickFalse) status=Riemersma(image,image_view,cube_info,level-1,EastGravity, exception); break; } case SouthGravity: { status=Riemersma(image,image_view,cube_info,level-1,EastGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,NorthGravity, exception); if (status != MagickFalse) status=Riemersma(image,image_view,cube_info,level-1,SouthGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,WestGravity, exception); if (status != MagickFalse) status=Riemersma(image,image_view,cube_info,level-1,SouthGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,SouthGravity, exception); if (status != MagickFalse) status=Riemersma(image,image_view,cube_info,level-1,WestGravity, exception); break; } default: break; } return(status); } static MagickBooleanType DitherImage(Image *image,CubeInfo *cube_info, ExceptionInfo *exception) { CacheView *image_view; const char *artifact; MagickBooleanType status; size_t extent, level; artifact=GetImageArtifact(image,"dither:diffusion-amount"); if (artifact != (const char *) NULL) cube_info->diffusion=StringToDoubleInterval(artifact,1.0); if (cube_info->quantize_info->dither_method != RiemersmaDitherMethod) return(FloydSteinbergDither(image,cube_info,exception)); /* Distribute quantization error along a Hilbert curve. */ (void) memset(cube_info->error,0,ErrorQueueLength*sizeof(*cube_info->error)); cube_info->x=0; cube_info->y=0; extent=MagickMax(image->columns,image->rows); level=(size_t) log2((double) extent); if (((size_t) 1UL << level) < extent) level++; cube_info->offset=0; cube_info->span=(MagickSizeType) image->columns*image->rows; image_view=AcquireAuthenticCacheView(image,exception); status=MagickTrue; if (level > 0) status=Riemersma(image,image_view,cube_info,level,NorthGravity,exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,ForgetGravity,exception); image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t C u b e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetCubeInfo() initialize the Cube data structure. % % The format of the GetCubeInfo method is: % % CubeInfo GetCubeInfo(const QuantizeInfo *quantize_info, % const size_t depth,const size_t maximum_colors) % % A description of each parameter follows. % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o depth: Normally, this integer value is zero or one. A zero or % one tells Quantize to choose a optimal tree depth of Log4(number_colors). % A tree of this depth generally allows the best representation of the % reference image with the least amount of memory and the fastest % computational speed. In some cases, such as an image with low color % dispersion (a few number of colors), a value other than % Log4(number_colors) is required. To expand the color tree completely, % use a value of 8. % % o maximum_colors: maximum colors. % */ static CubeInfo *GetCubeInfo(const QuantizeInfo *quantize_info, const size_t depth,const size_t maximum_colors) { CubeInfo *cube_info; double weight; size_t length; ssize_t i; /* Initialize tree to describe color cube_info. */ cube_info=(CubeInfo *) AcquireMagickMemory(sizeof(*cube_info)); if (cube_info == (CubeInfo *) NULL) return((CubeInfo *) NULL); (void) memset(cube_info,0,sizeof(*cube_info)); cube_info->depth=depth; if (cube_info->depth > MaxTreeDepth) cube_info->depth=MaxTreeDepth; if (cube_info->depth < 2) cube_info->depth=2; cube_info->maximum_colors=maximum_colors; /* Initialize root node. */ cube_info->root=GetNodeInfo(cube_info,0,0,(NodeInfo *) NULL); if (cube_info->root == (NodeInfo *) NULL) return((CubeInfo *) NULL); cube_info->root->parent=cube_info->root; cube_info->quantize_info=CloneQuantizeInfo(quantize_info); if (cube_info->quantize_info->dither_method == NoDitherMethod) return(cube_info); /* Initialize dither resources. */ length=(size_t) (1UL << (4*(8-CacheShift))); cube_info->memory_info=AcquireVirtualMemory(length,sizeof(*cube_info->cache)); if (cube_info->memory_info == (MemoryInfo *) NULL) return((CubeInfo *) NULL); cube_info->cache=(ssize_t *) GetVirtualMemoryBlob(cube_info->memory_info); /* Initialize color cache. */ (void) memset(cube_info->cache,(-1),sizeof(*cube_info->cache)*length); /* Distribute weights along a curve of exponential decay. */ weight=1.0; for (i=0; i < ErrorQueueLength; i++) { cube_info->weights[i]=PerceptibleReciprocal(weight); weight*=exp(log(1.0/ErrorRelativeWeight)/(ErrorQueueLength-1.0)); } cube_info->diffusion=1.0; return(cube_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t N o d e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetNodeInfo() allocates memory for a new node in the color cube tree and % presets all fields to zero. % % The format of the GetNodeInfo method is: % % NodeInfo *GetNodeInfo(CubeInfo *cube_info,const size_t id, % const size_t level,NodeInfo *parent) % % A description of each parameter follows. % % o node: The GetNodeInfo method returns a pointer to a queue of nodes. % % o id: Specifies the child number of the node. % % o level: Specifies the level in the storage_class the node resides. % */ static NodeInfo *GetNodeInfo(CubeInfo *cube_info,const size_t id, const size_t level,NodeInfo *parent) { NodeInfo *node_info; if (cube_info->free_nodes == 0) { Nodes *nodes; /* Allocate a new queue of nodes. */ nodes=(Nodes *) AcquireMagickMemory(sizeof(*nodes)); if (nodes == (Nodes *) NULL) return((NodeInfo *) NULL); nodes->nodes=(NodeInfo *) AcquireQuantumMemory(NodesInAList, sizeof(*nodes->nodes)); if (nodes->nodes == (NodeInfo *) NULL) return((NodeInfo *) NULL); nodes->next=cube_info->node_queue; cube_info->node_queue=nodes; cube_info->next_node=nodes->nodes; cube_info->free_nodes=NodesInAList; } cube_info->nodes++; cube_info->free_nodes--; node_info=cube_info->next_node++; (void) memset(node_info,0,sizeof(*node_info)); node_info->parent=parent; node_info->id=id; node_info->level=level; return(node_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e Q u a n t i z e E r r o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageQuantizeError() measures the difference between the original % and quantized images. This difference is the total quantization error. % The error is computed by summing over all pixels in an image the distance % squared in RGB space between each reference pixel value and its quantized % value. These values are computed: % % o mean_error_per_pixel: This value is the mean error for any single % pixel in the image. % % o normalized_mean_square_error: This value is the normalized mean % quantization error for any single pixel in the image. This distance % measure is normalized to a range between 0 and 1. It is independent % of the range of red, green, and blue values in the image. % % o normalized_maximum_square_error: Thsi value is the normalized % maximum quantization error for any single pixel in the image. This % distance measure is normalized to a range between 0 and 1. It is % independent of the range of red, green, and blue values in your image. % % The format of the GetImageQuantizeError method is: % % MagickBooleanType GetImageQuantizeError(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetImageQuantizeError(Image *image, ExceptionInfo *exception) { CacheView *image_view; double alpha, area, beta, distance, maximum_error, mean_error, mean_error_per_pixel; ssize_t index, y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); image->total_colors=GetNumberColors(image,(FILE *) NULL,exception); (void) memset(&image->error,0,sizeof(image->error)); if (image->storage_class == DirectClass) return(MagickTrue); alpha=1.0; beta=1.0; area=3.0*image->columns*image->rows; maximum_error=0.0; mean_error_per_pixel=0.0; mean_error=0.0; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *magick_restrict p; ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { index=(ssize_t) GetPixelIndex(image,p); if (image->alpha_trait != UndefinedPixelTrait) { alpha=(double) (QuantumScale*GetPixelAlpha(image,p)); beta=(double) (QuantumScale*image->colormap[index].alpha); } distance=fabs((double) (alpha*GetPixelRed(image,p)-beta* image->colormap[index].red)); mean_error_per_pixel+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; distance=fabs((double) (alpha*GetPixelGreen(image,p)-beta* image->colormap[index].green)); mean_error_per_pixel+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; distance=fabs((double) (alpha*GetPixelBlue(image,p)-beta* image->colormap[index].blue)); mean_error_per_pixel+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; p+=GetPixelChannels(image); } } image_view=DestroyCacheView(image_view); image->error.mean_error_per_pixel=(double) mean_error_per_pixel/area; image->error.normalized_mean_error=(double) QuantumScale*QuantumScale* mean_error/area; image->error.normalized_maximum_error=(double) QuantumScale*maximum_error; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t Q u a n t i z e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetQuantizeInfo() initializes the QuantizeInfo structure. % % The format of the GetQuantizeInfo method is: % % GetQuantizeInfo(QuantizeInfo *quantize_info) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to a QuantizeInfo structure. % */ MagickExport void GetQuantizeInfo(QuantizeInfo *quantize_info) { (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(quantize_info != (QuantizeInfo *) NULL); (void) memset(quantize_info,0,sizeof(*quantize_info)); quantize_info->number_colors=256; quantize_info->dither_method=RiemersmaDitherMethod; quantize_info->colorspace=UndefinedColorspace; quantize_info->measure_error=MagickFalse; quantize_info->signature=MagickCoreSignature; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % K m e a n s I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % KmeansImage() applies k-means color reduction to an image. This is a % colorspace clustering or segmentation technique. % % The format of the KmeansImage method is: % % MagickBooleanType KmeansImage(Image *image,const size_t number_colors, % const size_t max_iterations,const double tolerance, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o number_colors: number of colors to use as seeds. % % o max_iterations: maximum number of iterations while converging. % % o tolerance: the maximum tolerance. % % o exception: return any errors or warnings in this structure. % */ typedef struct _KmeansInfo { double red, green, blue, alpha, black, count, distortion; } KmeansInfo; static KmeansInfo **DestroyKmeansThreadSet(KmeansInfo **kmeans_info) { ssize_t i; assert(kmeans_info != (KmeansInfo **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (kmeans_info[i] != (KmeansInfo *) NULL) kmeans_info[i]=(KmeansInfo *) RelinquishMagickMemory(kmeans_info[i]); kmeans_info=(KmeansInfo **) RelinquishMagickMemory(kmeans_info); return(kmeans_info); } static KmeansInfo **AcquireKmeansThreadSet(const size_t number_colors) { KmeansInfo **kmeans_info; ssize_t i; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); kmeans_info=(KmeansInfo **) AcquireQuantumMemory(number_threads, sizeof(*kmeans_info)); if (kmeans_info == (KmeansInfo **) NULL) return((KmeansInfo **) NULL); (void) memset(kmeans_info,0,number_threads*sizeof(*kmeans_info)); for (i=0; i < (ssize_t) number_threads; i++) { kmeans_info[i]=(KmeansInfo *) AcquireQuantumMemory(number_colors, sizeof(**kmeans_info)); if (kmeans_info[i] == (KmeansInfo *) NULL) return(DestroyKmeansThreadSet(kmeans_info)); } return(kmeans_info); } static inline double KmeansMetric(const Image *magick_restrict image, const Quantum *magick_restrict p,const PixelInfo *magick_restrict q) { double gamma, metric, pixel; gamma=1.0; metric=0.0; if ((image->alpha_trait != UndefinedPixelTrait) || (q->alpha_trait != UndefinedPixelTrait)) { pixel=GetPixelAlpha(image,p)-(q->alpha_trait != UndefinedPixelTrait ? q->alpha : OpaqueAlpha); metric+=pixel*pixel; if (image->alpha_trait != UndefinedPixelTrait) gamma*=QuantumScale*GetPixelAlpha(image,p); if (q->alpha_trait != UndefinedPixelTrait) gamma*=QuantumScale*q->alpha; } if (image->colorspace == CMYKColorspace) { pixel=QuantumScale*(GetPixelBlack(image,p)-q->black); metric+=gamma*pixel*pixel; gamma*=QuantumScale*(QuantumRange-GetPixelBlack(image,p)); gamma*=QuantumScale*(QuantumRange-q->black); } metric*=3.0; pixel=QuantumScale*(GetPixelRed(image,p)-q->red); if (IsHueCompatibleColorspace(image->colorspace) != MagickFalse) { if (fabs((double) pixel) > 0.5) pixel-=0.5; pixel*=2.0; } metric+=gamma*pixel*pixel; pixel=QuantumScale*(GetPixelGreen(image,p)-q->green); metric+=gamma*pixel*pixel; pixel=QuantumScale*(GetPixelBlue(image,p)-q->blue); metric+=gamma*pixel*pixel; return(metric); } MagickExport MagickBooleanType KmeansImage(Image *image, const size_t number_colors,const size_t max_iterations,const double tolerance, ExceptionInfo *exception) { #define KmeansImageTag "Kmeans/Image" #define RandomColorComponent(info) (QuantumRange*GetPseudoRandomValue(info)) CacheView *image_view; const char *colors; double previous_tolerance; KmeansInfo **kmeans_pixels; MagickBooleanType verbose, status; ssize_t n; size_t number_threads; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); colors=GetImageArtifact(image,"kmeans:seed-colors"); if (colors == (const char *) NULL) { CubeInfo *cube_info; QuantizeInfo *quantize_info; size_t depth; /* Seed clusters from color quantization. */ quantize_info=AcquireQuantizeInfo((ImageInfo *) NULL); quantize_info->colorspace=image->colorspace; quantize_info->number_colors=number_colors; quantize_info->dither_method=NoDitherMethod; n=number_colors; for (depth=1; n != 0; depth++) n>>=2; cube_info=GetCubeInfo(quantize_info,depth,number_colors); if (cube_info == (CubeInfo *) NULL) { quantize_info=DestroyQuantizeInfo(quantize_info); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } status=ClassifyImageColors(cube_info,image,exception); if (status != MagickFalse) { if (cube_info->colors > cube_info->maximum_colors) ReduceImageColors(image,cube_info); status=SetImageColormap(image,cube_info,exception); } DestroyCubeInfo(cube_info); quantize_info=DestroyQuantizeInfo(quantize_info); if (status == MagickFalse) return(status); } else { char color[MagickPathExtent]; const char *p; /* Seed clusters from color list (e.g. red;green;blue). */ status=AcquireImageColormap(image,number_colors,exception); if (status == MagickFalse) return(status); for (n=0, p=colors; n < (ssize_t) image->colors; n++) { const char *q; for (q=p; *q != '\0'; q++) if (*q == ';') break; (void) CopyMagickString(color,p,(size_t) MagickMin(q-p+1, MagickPathExtent)); (void) QueryColorCompliance(color,AllCompliance,image->colormap+n, exception); if (*q == '\0') { n++; break; } p=q+1; } if (n < (ssize_t) image->colors) { RandomInfo *random_info; /* Seed clusters from random values. */ random_info=AcquireRandomInfo(); for ( ; n < (ssize_t) image->colors; n++) { (void) QueryColorCompliance("#000",AllCompliance,image->colormap+n, exception); image->colormap[n].red=RandomColorComponent(random_info); image->colormap[n].green=RandomColorComponent(random_info); image->colormap[n].blue=RandomColorComponent(random_info); if (image->alpha_trait != UndefinedPixelTrait) image->colormap[n].alpha=RandomColorComponent(random_info); if (image->colorspace == CMYKColorspace) image->colormap[n].black=RandomColorComponent(random_info); } random_info=DestroyRandomInfo(random_info); } } /* Iterative refinement. */ kmeans_pixels=AcquireKmeansThreadSet(number_colors); if (kmeans_pixels == (KmeansInfo **) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); previous_tolerance=0.0; verbose=IsStringTrue(GetImageArtifact(image,"debug")); number_threads=(size_t) GetMagickResourceLimit(ThreadResource); image_view=AcquireAuthenticCacheView(image,exception); for (n=0; n < (ssize_t) max_iterations; n++) { double distortion; ssize_t j, y; for (j=0; j < (ssize_t) number_threads; j++) (void) memset(kmeans_pixels[j],0,image->colors*sizeof(*kmeans_pixels[j])); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double min_distance; ssize_t i, k; /* Assign each pixel whose mean has the least squared color distance. */ k=0; min_distance=KmeansMetric(image,q,image->colormap+0); for (i=1; i < (ssize_t) image->colors; i++) { double distance; if (min_distance <= MagickEpsilon) break; distance=KmeansMetric(image,q,image->colormap+i); if (distance < min_distance) { min_distance=distance; k=i; } } kmeans_pixels[id][k].red+=QuantumScale*GetPixelRed(image,q); kmeans_pixels[id][k].green+=QuantumScale*GetPixelGreen(image,q); kmeans_pixels[id][k].blue+=QuantumScale*GetPixelBlue(image,q); if (image->alpha_trait != UndefinedPixelTrait) kmeans_pixels[id][k].alpha+=QuantumScale*GetPixelAlpha(image,q); if (image->colorspace == CMYKColorspace) kmeans_pixels[id][k].black+=QuantumScale*GetPixelBlack(image,q); kmeans_pixels[id][k].count++; kmeans_pixels[id][k].distortion+=min_distance; SetPixelIndex(image,(Quantum) k,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } if (status == MagickFalse) break; /* Reduce sums to [0] entry. */ for (j=1; j < (ssize_t) number_threads; j++) { ssize_t k; for (k=0; k < (ssize_t) image->colors; k++) { kmeans_pixels[0][k].red+=kmeans_pixels[j][k].red; kmeans_pixels[0][k].green+=kmeans_pixels[j][k].green; kmeans_pixels[0][k].blue+=kmeans_pixels[j][k].blue; if (image->alpha_trait != UndefinedPixelTrait) kmeans_pixels[0][k].alpha+=kmeans_pixels[j][k].alpha; if (image->colorspace == CMYKColorspace) kmeans_pixels[0][k].black+=kmeans_pixels[j][k].black; kmeans_pixels[0][k].count+=kmeans_pixels[j][k].count; kmeans_pixels[0][k].distortion+=kmeans_pixels[j][k].distortion; } } /* Calculate the new means (centroids) of the pixels in the new clusters. */ distortion=0.0; for (j=0; j < (ssize_t) image->colors; j++) { double gamma; gamma=PerceptibleReciprocal((double) kmeans_pixels[0][j].count); image->colormap[j].red=gamma*QuantumRange*kmeans_pixels[0][j].red; image->colormap[j].green=gamma*QuantumRange*kmeans_pixels[0][j].green; image->colormap[j].blue=gamma*QuantumRange*kmeans_pixels[0][j].blue; if (image->alpha_trait != UndefinedPixelTrait) image->colormap[j].alpha=gamma*QuantumRange*kmeans_pixels[0][j].alpha; if (image->colorspace == CMYKColorspace) image->colormap[j].black=gamma*QuantumRange*kmeans_pixels[0][j].black; distortion+=kmeans_pixels[0][j].distortion; } if (verbose != MagickFalse) (void) FormatLocaleFile(stderr,"distortion[%.20g]: %*g %*g\n",(double) n, GetMagickPrecision(),distortion,GetMagickPrecision(), fabs(distortion-previous_tolerance)); if (fabs(distortion-previous_tolerance) <= tolerance) break; previous_tolerance=distortion; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,KmeansImageTag,(MagickOffsetType) n, max_iterations); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); kmeans_pixels=DestroyKmeansThreadSet(kmeans_pixels); if (image->progress_monitor != (MagickProgressMonitor) NULL) (void) SetImageProgress(image,KmeansImageTag,(MagickOffsetType) max_iterations-1,max_iterations); if (status == MagickFalse) return(status); return(SyncImage(image,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P o s t e r i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PosterizeImage() reduces the image to a limited number of colors for a % "poster" effect. % % The format of the PosterizeImage method is: % % MagickBooleanType PosterizeImage(Image *image,const size_t levels, % const DitherMethod dither_method,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: Specifies a pointer to an Image structure. % % o levels: Number of color levels allowed in each channel. Very low values % (2, 3, or 4) have the most visible effect. % % o dither_method: choose from UndefinedDitherMethod, NoDitherMethod, % RiemersmaDitherMethod, FloydSteinbergDitherMethod. % % o exception: return any errors or warnings in this structure. % */ static inline double MagickRound(double x) { /* Round the fraction to nearest integer. */ if ((x-floor(x)) < (ceil(x)-x)) return(floor(x)); return(ceil(x)); } MagickExport MagickBooleanType PosterizeImage(Image *image,const size_t levels, const DitherMethod dither_method,ExceptionInfo *exception) { #define PosterizeImageTag "Posterize/Image" #define PosterizePixel(pixel) ClampToQuantum((MagickRealType) QuantumRange*( \ MagickRound(QuantumScale*pixel*(levels-1)))/MagickMax((ssize_t) levels-1,1)) CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; QuantizeInfo *quantize_info; ssize_t i; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (image->storage_class == PseudoClass) #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->colors,1) #endif for (i=0; i < (ssize_t) image->colors; i++) { /* Posterize colormap. */ if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].red=(double) PosterizePixel(image->colormap[i].red); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].green=(double) PosterizePixel(image->colormap[i].green); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].blue=(double) PosterizePixel(image->colormap[i].blue); if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].alpha=(double) PosterizePixel(image->colormap[i].alpha); } /* Posterize image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) SetPixelRed(image,PosterizePixel(GetPixelRed(image,q)),q); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) SetPixelGreen(image,PosterizePixel(GetPixelGreen(image,q)),q); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) SetPixelBlue(image,PosterizePixel(GetPixelBlue(image,q)),q); if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) SetPixelBlack(image,PosterizePixel(GetPixelBlack(image,q)),q); if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) SetPixelAlpha(image,PosterizePixel(GetPixelAlpha(image,q)),q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,PosterizeImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); quantize_info=AcquireQuantizeInfo((ImageInfo *) NULL); quantize_info->number_colors=(size_t) MagickMin((ssize_t) levels*levels* levels,MaxColormapSize+1); quantize_info->dither_method=dither_method; quantize_info->tree_depth=MaxTreeDepth; status=QuantizeImage(quantize_info,image,exception); quantize_info=DestroyQuantizeInfo(quantize_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P r u n e C h i l d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PruneChild() deletes the given node and merges its statistics into its % parent. % % The format of the PruneSubtree method is: % % PruneChild(CubeInfo *cube_info,const NodeInfo *node_info) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is to be pruned. % */ static void PruneChild(CubeInfo *cube_info,const NodeInfo *node_info) { NodeInfo *parent; size_t number_children; ssize_t i; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) PruneChild(cube_info,node_info->child[i]); if (cube_info->nodes > cube_info->maximum_colors) { /* Merge color statistics into parent. */ parent=node_info->parent; parent->number_unique+=node_info->number_unique; parent->total_color.red+=node_info->total_color.red; parent->total_color.green+=node_info->total_color.green; parent->total_color.blue+=node_info->total_color.blue; parent->total_color.alpha+=node_info->total_color.alpha; parent->child[node_info->id]=(NodeInfo *) NULL; cube_info->nodes--; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P r u n e L e v e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PruneLevel() deletes all nodes at the bottom level of the color tree merging % their color statistics into their parent node. % % The format of the PruneLevel method is: % % PruneLevel(CubeInfo *cube_info,const NodeInfo *node_info) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is to be pruned. % */ static void PruneLevel(CubeInfo *cube_info,const NodeInfo *node_info) { size_t number_children; ssize_t i; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) PruneLevel(cube_info,node_info->child[i]); if (node_info->level == cube_info->depth) PruneChild(cube_info,node_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P r u n e T o C u b e D e p t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PruneToCubeDepth() deletes any nodes at a depth greater than % cube_info->depth while merging their color statistics into their parent % node. % % The format of the PruneToCubeDepth method is: % % PruneToCubeDepth(CubeInfo *cube_info,const NodeInfo *node_info) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is to be pruned. % */ static void PruneToCubeDepth(CubeInfo *cube_info,const NodeInfo *node_info) { size_t number_children; ssize_t i; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) PruneToCubeDepth(cube_info,node_info->child[i]); if (node_info->level > cube_info->depth) PruneChild(cube_info,node_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % Q u a n t i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QuantizeImage() analyzes the colors within a reference image and chooses a % fixed number of colors to represent the image. The goal of the algorithm % is to minimize the color difference between the input and output image while % minimizing the processing time. % % The format of the QuantizeImage method is: % % MagickBooleanType QuantizeImage(const QuantizeInfo *quantize_info, % Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType QuantizeImage(const QuantizeInfo *quantize_info, Image *image,ExceptionInfo *exception) { CubeInfo *cube_info; ImageType type; MagickBooleanType status; size_t depth, maximum_colors; assert(quantize_info != (const QuantizeInfo *) NULL); assert(quantize_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); maximum_colors=quantize_info->number_colors; if (maximum_colors == 0) maximum_colors=MaxColormapSize; if (maximum_colors > MaxColormapSize) maximum_colors=MaxColormapSize; type=IdentifyImageType(image,exception); if (IsGrayImageType(type) != MagickFalse) (void) SetGrayscaleImage(image,exception); depth=quantize_info->tree_depth; if (depth == 0) { size_t colors; /* Depth of color tree is: Log4(colormap size)+2. */ colors=maximum_colors; for (depth=1; colors != 0; depth++) colors>>=2; if ((quantize_info->dither_method != NoDitherMethod) && (depth > 2)) depth--; if ((image->alpha_trait != UndefinedPixelTrait) && (depth > 5)) depth--; if (IsGrayImageType(type) != MagickFalse) depth=MaxTreeDepth; } /* Initialize color cube. */ cube_info=GetCubeInfo(quantize_info,depth,maximum_colors); if (cube_info == (CubeInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=ClassifyImageColors(cube_info,image,exception); if (status != MagickFalse) { /* Reduce the number of colors in the image. */ if (cube_info->colors > cube_info->maximum_colors) ReduceImageColors(image,cube_info); status=AssignImageColors(image,cube_info,exception); } DestroyCubeInfo(cube_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % Q u a n t i z e I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QuantizeImages() analyzes the colors within a set of reference images and % chooses a fixed number of colors to represent the set. The goal of the % algorithm is to minimize the color difference between the input and output % images while minimizing the processing time. % % The format of the QuantizeImages method is: % % MagickBooleanType QuantizeImages(const QuantizeInfo *quantize_info, % Image *images,ExceptionInfo *exception) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o images: Specifies a pointer to a list of Image structures. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType QuantizeImages(const QuantizeInfo *quantize_info, Image *images,ExceptionInfo *exception) { CubeInfo *cube_info; Image *image; MagickBooleanType proceed, status; MagickProgressMonitor progress_monitor; size_t depth, maximum_colors, number_images; ssize_t i; assert(quantize_info != (const QuantizeInfo *) NULL); assert(quantize_info->signature == MagickCoreSignature); assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (GetNextImageInList(images) == (Image *) NULL) { /* Handle a single image with QuantizeImage. */ status=QuantizeImage(quantize_info,images,exception); return(status); } status=MagickFalse; maximum_colors=quantize_info->number_colors; if (maximum_colors == 0) maximum_colors=MaxColormapSize; if (maximum_colors > MaxColormapSize) maximum_colors=MaxColormapSize; depth=quantize_info->tree_depth; if (depth == 0) { size_t colors; /* Depth of color tree is: Log4(colormap size)+2. */ colors=maximum_colors; for (depth=1; colors != 0; depth++) colors>>=2; if (quantize_info->dither_method != NoDitherMethod) depth--; } /* Initialize color cube. */ cube_info=GetCubeInfo(quantize_info,depth,maximum_colors); if (cube_info == (CubeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename); return(MagickFalse); } number_images=GetImageListLength(images); image=images; for (i=0; image != (Image *) NULL; i++) { progress_monitor=SetImageProgressMonitor(image,(MagickProgressMonitor) NULL, image->client_data); status=ClassifyImageColors(cube_info,image,exception); if (status == MagickFalse) break; (void) SetImageProgressMonitor(image,progress_monitor,image->client_data); proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) i, number_images); if (proceed == MagickFalse) break; image=GetNextImageInList(image); } if (status != MagickFalse) { /* Reduce the number of colors in an image sequence. */ ReduceImageColors(images,cube_info); image=images; for (i=0; image != (Image *) NULL; i++) { progress_monitor=SetImageProgressMonitor(image,(MagickProgressMonitor) NULL,image->client_data); status=AssignImageColors(image,cube_info,exception); if (status == MagickFalse) break; (void) SetImageProgressMonitor(image,progress_monitor, image->client_data); proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) i, number_images); if (proceed == MagickFalse) break; image=GetNextImageInList(image); } } DestroyCubeInfo(cube_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + Q u a n t i z e E r r o r F l a t t e n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QuantizeErrorFlatten() traverses the color cube and flattens the quantization % error into a sorted 1D array. This accelerates the color reduction process. % % Contributed by Yoya. % % The format of the QuantizeErrorFlatten method is: % % size_t QuantizeErrorFlatten(const CubeInfo *cube_info, % const NodeInfo *node_info,const ssize_t offset, % double *quantize_error) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is current pointer. % % o offset: quantize error offset. % % o quantize_error: the quantization error vector. % */ static size_t QuantizeErrorFlatten(const CubeInfo *cube_info, const NodeInfo *node_info,const ssize_t offset,double *quantize_error) { size_t n, number_children; ssize_t i; if (offset >= (ssize_t) cube_info->nodes) return(0); quantize_error[offset]=node_info->quantize_error; n=1; number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children ; i++) if (node_info->child[i] != (NodeInfo *) NULL) n+=QuantizeErrorFlatten(cube_info,node_info->child[i],offset+n, quantize_error); return(n); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e d u c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Reduce() traverses the color cube tree and prunes any node whose % quantization error falls below a particular threshold. % % The format of the Reduce method is: % % Reduce(CubeInfo *cube_info,const NodeInfo *node_info) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is to be pruned. % */ static void Reduce(CubeInfo *cube_info,const NodeInfo *node_info) { size_t number_children; ssize_t i; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) Reduce(cube_info,node_info->child[i]); if (node_info->quantize_error <= cube_info->pruning_threshold) PruneChild(cube_info,node_info); else { /* Find minimum pruning threshold. */ if (node_info->number_unique > 0) cube_info->colors++; if (node_info->quantize_error < cube_info->next_threshold) cube_info->next_threshold=node_info->quantize_error; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e d u c e I m a g e C o l o r s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReduceImageColors() repeatedly prunes the tree until the number of nodes % with n2 > 0 is less than or equal to the maximum number of colors allowed % in the output image. On any given iteration over the tree, it selects % those nodes whose E value is minimal for pruning and merges their % color statistics upward. It uses a pruning threshold, Ep, to govern % node selection as follows: % % Ep = 0 % while number of nodes with (n2 > 0) > required maximum number of colors % prune all nodes such that E <= Ep % Set Ep to minimum E in remaining nodes % % This has the effect of minimizing any quantization error when merging % two nodes together. % % When a node to be pruned has offspring, the pruning procedure invokes % itself recursively in order to prune the tree from the leaves upward. % n2, Sr, Sg, and Sb in a node being pruned are always added to the % corresponding data in that node's parent. This retains the pruned % node's color characteristics for later averaging. % % For each node, n2 pixels exist for which that node represents the % smallest volume in RGB space containing those pixel's colors. When n2 % > 0 the node will uniquely define a color in the output image. At the % beginning of reduction, n2 = 0 for all nodes except a the leaves of % the tree which represent colors present in the input image. % % The other pixel count, n1, indicates the total number of colors % within the cubic volume which the node represents. This includes n1 - % n2 pixels whose colors should be defined by nodes at a lower level in % the tree. % % The format of the ReduceImageColors method is: % % ReduceImageColors(const Image *image,CubeInfo *cube_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % */ static int QuantizeErrorCompare(const void *error_p,const void *error_q) { double *p, *q; p=(double *) error_p; q=(double *) error_q; if (*p > *q) return(1); if (fabs(*q-*p) <= MagickEpsilon) return(0); return(-1); } static void ReduceImageColors(const Image *image,CubeInfo *cube_info) { #define ReduceImageTag "Reduce/Image" MagickBooleanType proceed; MagickOffsetType offset; size_t span; cube_info->next_threshold=0.0; if (cube_info->colors > cube_info->maximum_colors) { double *quantize_error; /* Enable rapid reduction of the number of unique colors. */ quantize_error=(double *) AcquireQuantumMemory(cube_info->nodes, sizeof(*quantize_error)); if (quantize_error != (double *) NULL) { (void) QuantizeErrorFlatten(cube_info,cube_info->root,0, quantize_error); qsort(quantize_error,cube_info->nodes,sizeof(double), QuantizeErrorCompare); if (cube_info->nodes > (110*(cube_info->maximum_colors+1)/100)) cube_info->next_threshold=quantize_error[cube_info->nodes-110* (cube_info->maximum_colors+1)/100]; quantize_error=(double *) RelinquishMagickMemory(quantize_error); } } for (span=cube_info->colors; cube_info->colors > cube_info->maximum_colors; ) { cube_info->pruning_threshold=cube_info->next_threshold; cube_info->next_threshold=cube_info->root->quantize_error-1; cube_info->colors=0; Reduce(cube_info,cube_info->root); offset=(MagickOffsetType) span-cube_info->colors; proceed=SetImageProgress(image,ReduceImageTag,offset,span- cube_info->maximum_colors+1); if (proceed == MagickFalse) break; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e m a p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RemapImage() replaces the colors of an image with the closest of the colors % from the reference image. % % The format of the RemapImage method is: % % MagickBooleanType RemapImage(const QuantizeInfo *quantize_info, % Image *image,const Image *remap_image,ExceptionInfo *exception) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o image: the image. % % o remap_image: the reference image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType RemapImage(const QuantizeInfo *quantize_info, Image *image,const Image *remap_image,ExceptionInfo *exception) { CubeInfo *cube_info; MagickBooleanType status; /* Initialize color cube. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(remap_image != (Image *) NULL); assert(remap_image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); cube_info=GetCubeInfo(quantize_info,MaxTreeDepth, quantize_info->number_colors); if (cube_info == (CubeInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=ClassifyImageColors(cube_info,remap_image,exception); if (status != MagickFalse) { /* Classify image colors from the reference image. */ cube_info->quantize_info->number_colors=cube_info->colors; status=AssignImageColors(image,cube_info,exception); } DestroyCubeInfo(cube_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e m a p I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RemapImages() replaces the colors of a sequence of images with the % closest color from a reference image. % % The format of the RemapImage method is: % % MagickBooleanType RemapImages(const QuantizeInfo *quantize_info, % Image *images,Image *remap_image,ExceptionInfo *exception) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o images: the image sequence. % % o remap_image: the reference image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType RemapImages(const QuantizeInfo *quantize_info, Image *images,const Image *remap_image,ExceptionInfo *exception) { CubeInfo *cube_info; Image *image; MagickBooleanType status; assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=images; if (remap_image == (Image *) NULL) { /* Create a global colormap for an image sequence. */ status=QuantizeImages(quantize_info,images,exception); return(status); } /* Classify image colors from the reference image. */ cube_info=GetCubeInfo(quantize_info,MaxTreeDepth, quantize_info->number_colors); if (cube_info == (CubeInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=ClassifyImageColors(cube_info,remap_image,exception); if (status != MagickFalse) { /* Classify image colors from the reference image. */ cube_info->quantize_info->number_colors=cube_info->colors; image=images; for ( ; image != (Image *) NULL; image=GetNextImageInList(image)) { status=AssignImageColors(image,cube_info,exception); if (status == MagickFalse) break; } } DestroyCubeInfo(cube_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t G r a y s c a l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetGrayscaleImage() converts an image to a PseudoClass grayscale image. % % The format of the SetGrayscaleImage method is: % % MagickBooleanType SetGrayscaleImage(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: The image. % % o exception: return any errors or warnings in this structure. % */ #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static int IntensityCompare(const void *x,const void *y) { double intensity; PixelInfo *color_1, *color_2; color_1=(PixelInfo *) x; color_2=(PixelInfo *) y; intensity=GetPixelInfoIntensity((const Image *) NULL,color_1)- GetPixelInfoIntensity((const Image *) NULL,color_2); if (intensity < (double) INT_MIN) intensity=(double) INT_MIN; if (intensity > (double) INT_MAX) intensity=(double) INT_MAX; return((int) intensity); } #if defined(__cplusplus) || defined(c_plusplus) } #endif static MagickBooleanType SetGrayscaleImage(Image *image, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; PixelInfo *colormap; size_t extent; ssize_t *colormap_index, i, j, y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->type != GrayscaleType) (void) TransformImageColorspace(image,GRAYColorspace,exception); extent=MagickMax(image->colors+1,MagickMax(MaxColormapSize,MaxMap+1)); colormap_index=(ssize_t *) AcquireQuantumMemory(extent, sizeof(*colormap_index)); if (colormap_index == (ssize_t *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); if (image->storage_class != PseudoClass) { (void) memset(colormap_index,(-1),extent*sizeof(*colormap_index)); if (AcquireImageColormap(image,MaxColormapSize,exception) == MagickFalse) { colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } image->colors=0; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { size_t intensity; intensity=ScaleQuantumToMap(GetPixelRed(image,q)); if (colormap_index[intensity] < 0) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SetGrayscaleImage) #endif if (colormap_index[intensity] < 0) { colormap_index[intensity]=(ssize_t) image->colors; image->colormap[image->colors].red=(double) GetPixelRed(image,q); image->colormap[image->colors].green=(double) GetPixelGreen(image,q); image->colormap[image->colors].blue=(double) GetPixelBlue(image,q); image->colors++; } } SetPixelIndex(image,(Quantum) colormap_index[intensity],q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); } (void) memset(colormap_index,0,extent*sizeof(*colormap_index)); for (i=0; i < (ssize_t) image->colors; i++) image->colormap[i].alpha=(double) i; qsort((void *) image->colormap,image->colors,sizeof(PixelInfo), IntensityCompare); colormap=(PixelInfo *) AcquireQuantumMemory(image->colors,sizeof(*colormap)); if (colormap == (PixelInfo *) NULL) { colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } j=0; colormap[j]=image->colormap[0]; for (i=0; i < (ssize_t) image->colors; i++) { if (IsPixelInfoEquivalent(&colormap[j],&image->colormap[i]) == MagickFalse) { j++; colormap[j]=image->colormap[i]; } colormap_index[(ssize_t) image->colormap[i].alpha]=j; } image->colors=(size_t) (j+1); image->colormap=(PixelInfo *) RelinquishMagickMemory(image->colormap); image->colormap=colormap; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelIndex(image,(Quantum) colormap_index[ScaleQuantumToMap( GetPixelIndex(image,q))],q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index); image->type=GrayscaleType; if (SetImageMonochrome(image,exception) != MagickFalse) image->type=BilevelType; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S e t I m a g e C o l o r m a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageColormap() traverses the color cube tree and sets the colormap of % the image. A colormap entry is any node in the color cube tree where the % of unique colors is not zero. % % The format of the SetImageColormap method is: % % MagickBooleanType SetImageColormap(Image *image,CubeInfo *cube_info, % ExceptionInfo *node_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % % o exception: return any errors or warnings in this structure. % */ MagickBooleanType SetImageColormap(Image *image,CubeInfo *cube_info, ExceptionInfo *exception) { size_t number_colors; number_colors=MagickMax(cube_info->maximum_colors,cube_info->colors); if (AcquireImageColormap(image,number_colors,exception) == MagickFalse) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); image->colors=0; DefineImageColormap(image,cube_info,cube_info->root); if (image->colors != number_colors) { image->colormap=(PixelInfo *) ResizeQuantumMemory(image->colormap, image->colors+1,sizeof(*image->colormap)); if (image->colormap == (PixelInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } return(MagickTrue); }
hello_openmp.c
/****************************************************************************** * FILE: omp_hello.c * DESCRIPTION: * OpenMP Example - Hello World - C/C++ Version * In this simple example, the master thread forks a parallel region. * All threads in the team obtain their unique thread number and print it. * The master thread only prints the total number of threads. Two OpenMP * library routines are used to obtain the number of threads and each * thread's number. * AUTHOR: Blaise Barney 5/99 * LAST REVISED: 04/06/05 ******************************************************************************/ #include <omp.h> #include <stdio.h> #include <stdlib.h> int main (int argc, char *argv[]) { int nthreads, tid; /* Fork a team of threads giving them their own copies of variables */ #pragma omp parallel private(nthreads, tid) { /* Obtain thread number */ tid = omp_get_thread_num(); printf("Hello World from thread = %d\n", tid); /* Only master thread does this */ if (tid == 0) { nthreads = omp_get_num_threads(); printf("Number of threads = %d\n", nthreads); } } /* All threads join master thread and disband */ }
GB_binop__bset_int64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bset_int64) // A.*B function (eWiseMult): GB (_AemultB_01__bset_int64) // A.*B function (eWiseMult): GB (_AemultB_02__bset_int64) // A.*B function (eWiseMult): GB (_AemultB_03__bset_int64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bset_int64) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__bset_int64) // C+=b function (dense accum): GB (_Cdense_accumb__bset_int64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bset_int64) // C=scalar+B GB (_bind1st__bset_int64) // C=scalar+B' GB (_bind1st_tran__bset_int64) // C=A+scalar GB (_bind2nd__bset_int64) // C=A'+scalar GB (_bind2nd_tran__bset_int64) // C type: int64_t // A type: int64_t // B,b type: int64_t // BinaryOp: cij = GB_BITSET (aij, bij, int64_t, 64) #define GB_ATYPE \ int64_t #define GB_BTYPE \ int64_t #define GB_CTYPE \ int64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int64_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int64_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_BITSET (x, y, int64_t, 64) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BSET || GxB_NO_INT64 || GxB_NO_BSET_INT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__bset_int64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bset_int64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bset_int64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int64_t int64_t bwork = (*((int64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *restrict Cx = (int64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *restrict Cx = (int64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bset_int64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__bset_int64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bset_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__bset_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bset_int64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bset_int64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *Cx = (int64_t *) Cx_output ; int64_t x = (*((int64_t *) x_input)) ; int64_t *Bx = (int64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int64_t bij = GBX (Bx, p, false) ; Cx [p] = GB_BITSET (x, bij, int64_t, 64) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bset_int64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int64_t *Cx = (int64_t *) Cx_output ; int64_t *Ax = (int64_t *) Ax_input ; int64_t y = (*((int64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int64_t aij = GBX (Ax, p, false) ; Cx [p] = GB_BITSET (aij, y, int64_t, 64) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_BITSET (x, aij, int64_t, 64) ; \ } GrB_Info GB (_bind1st_tran__bset_int64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t x = (*((const int64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_BITSET (aij, y, int64_t, 64) ; \ } GrB_Info GB (_bind2nd_tran__bset_int64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t y = (*((const int64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
_rsky.c
/* The batman package: fast computation of exoplanet transit light curves * Copyright (C) 2015 Laura Kreidberg * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION #include <Python.h> #include "numpy/arrayobject.h" #if defined (_OPENACC) && defined(__PGI) # include <accelmath.h> #else # include <math.h> #endif #if defined (_OPENMP) && !defined(_OPENACC) # include <omp.h> #endif #ifndef M_PI #define M_PI 3.14159265358979323846 #endif #define MIN(x, y) (((x) < (y)) ? (x) : (y)) #define MAX(x, y) (((x) > (y)) ? (x) : (y)) static PyObject *_rsky(PyObject *self, PyObject *args); static PyObject *_getf(PyObject *self, PyObject *args); inline double getE(double M, double e) //calculates the eccentric anomaly (see Seager Exoplanets book: Murray & Correia eqn. 5 -- see section 3) { double E = M, eps = 1.0e-7; double fe, fs; // modification from LK 05/07/2017: // add fmod to ensure convergence for diabolical inputs (following Eastman et al. 2013; Section 3.1) while(fmod(fabs(E - e*sin(E) - M), 2.*M_PI) > eps) { fe = fmod(E - e*sin(E) - M, 2.*M_PI); fs = fmod(1 - e*cos(E), 2.*M_PI); E = E - fe/fs; } return E; } static PyObject *_rsky_or_f(PyObject *self, PyObject *args, int f_only) { /* This module computes the distance between the centers of the star and the planet in the plane of the sky. This parameter is denoted r_sky = sqrt(x^2 + y^2) in the Seager Exoplanets book (see the section by Murray, and Winn eq. 5). In the Mandel & Agol (2002) paper, this quantity is denoted d. If f_only is 1, this function returns the true anomaly instead of the distance. */ double ecc, inc, a, omega, per, tc, BIGD = 100.; int transittype, nthreads;; npy_intp dims[1]; PyArrayObject *ts, *ds; if(!PyArg_ParseTuple(args,"Oddddddii", &ts, &tc, &per, &a, &inc, &ecc, &omega, &transittype, &nthreads)) return NULL; dims[0] = PyArray_DIMS(ts)[0]; ds = (PyArrayObject *) PyArray_SimpleNew(1, dims, PyArray_TYPE(ts)); double *t_array = PyArray_DATA(ts); double *output_array = PyArray_DATA(ds); const double n = 2.*M_PI/per; // mean motion const double eps = 1.0e-7; #if defined (_OPENMP) && !defined(_OPENACC) omp_set_num_threads(nthreads); //specifies number of threads (if OpenMP is supported) #endif #if defined (_OPENACC) #pragma acc parallel loop copyin(t_array[:dims[0]]) copyout(output_array[:dims[0]]) #elif defined (_OPENMP) #pragma omp parallel for #endif for(int i = 0; i < dims[0]; i++) { double t = t_array[i]; //calculates time of periastron passage from time of inferior conjunction double f = M_PI/2. - omega; //true anomaly corresponding to time of primary transit center double E = 2.*atan(sqrt((1. - ecc)/(1. + ecc))*tan(f/2.)); //corresponding eccentric anomaly double M = E - ecc*sin(E); double tp = tc - per*M/2./M_PI; //time of periastron if(ecc < 1.0e-5) { f = ((t - tp)/per - (int)((t - tp)/per))*2.*M_PI; //calculates f for a circular orbit } else { M = n*(t - tp); E = getE(M, ecc); f = 2.*atan(sqrt((1.+ecc)/(1.-ecc))*tan(E/2.)); } if (f_only) { output_array[i] = f; } else { double d; if (transittype == 1 && sin(f + omega)*sin(inc) <= 0.) d = BIGD; //z < 0, so d is set to large value in order to not model primary transit during secondary eclipse else if (transittype == 2 && sin(f + omega)*sin(inc) >= 0.) d = BIGD; //z > 0, so d is set to large value in order not to model secondary eclipse during primary transit else d = a*(1.0 - ecc*ecc)/(1.0 + ecc*cos(f))*sqrt(1.0 - sin(omega + f)*sin(omega + f)*sin(inc)*sin(inc)); //calculates separation of centers output_array[i] = d; } } return PyArray_Return((PyArrayObject *)ds); } static PyObject *_rsky(PyObject *self, PyObject *args) { return _rsky_or_f(self, args, 0); } static PyObject *_getf(PyObject *self, PyObject *args) { return _rsky_or_f(self, args, 1); } static char _rsky_doc[] = """ This module computes the distance between the centers of the \ star and the planet in the plane of the sky. This parameter is \ denoted r_sky = sqrt(x^2 + y^2) in the Seager Exoplanets book \ (see the section by Murray, and Winn eq. 5). In the Mandel & Agol (2002) paper, \ this quantity is denoted d.\ LK 4/27/12 """; static char _getf_doc[] = """ This module computes the true anomaly. This parameter is \ denoted f in the Seager Exoplanets book \ (see the section by Murray, and Winn eq. 44).\ BM 1/18/16 """; static PyMethodDef _rsky_methods[] = { {"_rsky", _rsky,METH_VARARGS,_rsky_doc},{"_getf", _getf,METH_VARARGS,_getf_doc},{NULL}}; #if PY_MAJOR_VERSION >= 3 static struct PyModuleDef _rsky_module = { PyModuleDef_HEAD_INIT, "_rsky", _rsky_doc, -1, _rsky_methods }; PyMODINIT_FUNC PyInit__rsky(void) { PyObject* module = PyModule_Create(&_rsky_module); if(!module) { return NULL; } import_array(); return module; } #else void init_rsky(void) { Py_InitModule("_rsky", _rsky_methods); import_array(); } #endif