source
stringlengths
3
92
c
stringlengths
26
2.25M
GB_unop__identity_int8_int32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_int8_int32) // op(A') function: GB (_unop_tran__identity_int8_int32) // C type: int8_t // A type: int32_t // cast: int8_t cij = (int8_t) aij // unaryop: cij = aij #define GB_ATYPE \ int32_t #define GB_CTYPE \ int8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ int8_t z = (int8_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int8_t z = (int8_t) aij ; \ Cx [pC] = z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT8 || GxB_NO_INT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_int8_int32) ( int8_t *Cx, // Cx and Ax may be aliased const int32_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (int32_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int32_t aij = Ax [p] ; int8_t z = (int8_t) aij ; Cx [p] = z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int32_t aij = Ax [p] ; int8_t z = (int8_t) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_int8_int32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
taskloop_simd_misc_messages.c
// RUN: %clang_cc1 -fsyntax-only -fopenmp -triple x86_64-unknown-unknown -verify %s -Wuninitialized // RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -triple x86_64-unknown-unknown -verify %s -Wuninitialized // expected-error@+1 {{unexpected OpenMP directive '#pragma omp taskloop simd'}} #pragma omp taskloop simd // expected-error@+1 {{unexpected OpenMP directive '#pragma omp taskloop simd'}} #pragma omp taskloop simd foo void test_no_clause() { int i; #pragma omp taskloop simd for (i = 0; i < 16; ++i) ; // expected-error@+2 {{statement after '#pragma omp taskloop simd' must be a for loop}} #pragma omp taskloop simd ++i; } void test_branch_protected_scope() { int i = 0; L1: ++i; int x[24]; #pragma omp parallel #pragma omp taskloop simd for (i = 0; i < 16; ++i) { if (i == 5) goto L1; // expected-error {{use of undeclared label 'L1'}} else if (i == 6) return; // expected-error {{cannot return from OpenMP region}} else if (i == 7) goto L2; else if (i == 8) { L2: x[i]++; } } if (x[0] == 0) goto L2; // expected-error {{use of undeclared label 'L2'}} else if (x[1] == 1) goto L1; } void test_invalid_clause() { int i; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp taskloop simd' are ignored}} #pragma omp taskloop simd foo bar for (i = 0; i < 16; ++i) ; // expected-error@+1 {{directive '#pragma omp taskloop simd' cannot contain more than one 'nogroup' clause}} #pragma omp taskloop simd nogroup nogroup for (i = 0; i < 16; ++i) ; } void test_non_identifiers() { int i, x; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp taskloop simd' are ignored}} #pragma omp taskloop simd; for (i = 0; i < 16; ++i) ; // expected-warning@+2 {{extra tokens at the end of '#pragma omp taskloop simd' are ignored}} #pragma omp parallel #pragma omp taskloop simd linear(x); for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp taskloop simd' are ignored}} #pragma omp taskloop simd private(x); for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp taskloop simd' are ignored}} #pragma omp taskloop simd, private(x); for (i = 0; i < 16; ++i) ; } extern int foo(); void test_collapse() { int i; #pragma omp parallel // expected-error@+1 {{expected '('}} #pragma omp taskloop simd collapse for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp taskloop simd collapse( for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp taskloop simd collapse() for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp taskloop simd collapse(, for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp taskloop simd collapse(, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-warning@+2 {{extra tokens at the end of '#pragma omp taskloop simd' are ignored}} // expected-error@+1 {{expected '('}} #pragma omp taskloop simd collapse 4) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp taskloop simd collapse(4 for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp taskloop simd', but found only 1}} #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp taskloop simd collapse(4, for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp taskloop simd', but found only 1}} #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp taskloop simd collapse(4, ) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp taskloop simd', but found only 1}} #pragma omp parallel // expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp taskloop simd collapse(4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp taskloop simd', but found only 1}} #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp taskloop simd collapse(4 4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp taskloop simd', but found only 1}} #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp taskloop simd collapse(4, , 4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp taskloop simd', but found only 1}} #pragma omp parallel #pragma omp taskloop simd collapse(4) for (int i1 = 0; i1 < 16; ++i1) for (int i2 = 0; i2 < 16; ++i2) for (int i3 = 0; i3 < 16; ++i3) for (int i4 = 0; i4 < 16; ++i4) foo(); #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp taskloop simd collapse(4, 8) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp taskloop simd', but found only 1}} #pragma omp parallel // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp taskloop simd collapse(2.5) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp taskloop simd collapse(foo()) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp taskloop simd collapse(-5) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp taskloop simd collapse(0) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp taskloop simd collapse(5 - 5) for (i = 0; i < 16; ++i) ; } void test_private() { int i; #pragma omp parallel // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp taskloop simd private( for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp taskloop simd private(, for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 2 {{expected expression}} #pragma omp taskloop simd private(, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp taskloop simd private() for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp taskloop simd private(int) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected variable name}} #pragma omp taskloop simd private(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp parallel #pragma omp taskloop simd private(x) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp taskloop simd private(x, y) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp taskloop simd private(x, y, z) for (i = 0; i < 16; ++i) { x = y * i + z; } } void test_lastprivate() { int i; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} #pragma omp taskloop simd lastprivate( for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp taskloop simd lastprivate(, for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 2 {{expected expression}} #pragma omp taskloop simd lastprivate(, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp taskloop simd lastprivate() for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp taskloop simd lastprivate(int) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected variable name}} #pragma omp taskloop simd lastprivate(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp parallel #pragma omp taskloop simd lastprivate(x) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp taskloop simd lastprivate(x, y) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp taskloop simd lastprivate(x, y, z) for (i = 0; i < 16; ++i) ; } void test_firstprivate() { int i; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} #pragma omp taskloop simd firstprivate( for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp taskloop simd firstprivate(, for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 2 {{expected expression}} #pragma omp taskloop simd firstprivate(, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp taskloop simd firstprivate() for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp taskloop simd firstprivate(int) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected variable name}} #pragma omp taskloop simd firstprivate(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp parallel #pragma omp taskloop simd lastprivate(x) firstprivate(x) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp taskloop simd lastprivate(x, y) firstprivate(x, y) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp taskloop simd lastprivate(x, y, z) firstprivate(x, y, z) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}} #pragma omp taskloop simd simdlen(64) safelen(8) for (i = 0; i < 16; ++i) ; } void test_loop_messages() { float a[100], b[100], c[100]; #pragma omp parallel // expected-error@+2 {{variable must be of integer or pointer type}} #pragma omp taskloop simd for (float fi = 0; fi < 10.0; fi++) { c[(int)fi] = a[(int)fi] + b[(int)fi]; } #pragma omp parallel // expected-error@+2 {{variable must be of integer or pointer type}} #pragma omp taskloop simd for (double fi = 0; fi < 10.0; fi++) { c[(int)fi] = a[(int)fi] + b[(int)fi]; } // expected-warning@+2 {{OpenMP loop iteration variable cannot have more than 64 bits size and will be narrowed}} #pragma omp taskloop simd for (__int128 ii = 0; ii < 10; ii++) { c[ii] = a[ii] + b[ii]; } }
ssh_ng_fmt_plug.c
/* Fast cracker for SSH RSA / DSA key files. Hacked together during October * of 2012 by Dhiru Kholia <dhiru.kholia at gmail.com>. * * Support for cracking new openssh key format (bcrypt pbkdf) was added by * m3g9tr0n (Spiros Fraganastasis) and Dhiru Kholia in September of 2014. This * is dedicated to Raquel :-) * * Ideas borrowed from SSH2 protocol library, http://pypi.python.org/pypi/ssh * Copyright (C) 2011 Jeff Forcier <jeff@bitprophet.org> * * This software is Copyright (c) 2012, Dhiru Kholia <dhiru.kholia at gmail.com>, * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without modification, * are permitted. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_sshng; #elif FMT_REGISTERS_H john_register_one(&fmt_sshng); #else #include <string.h> #include <stdint.h> #include <openssl/des.h> #include <assert.h> #include <ctype.h> #include <errno.h> #ifdef _OPENMP static int omp_t = 1; #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 16 // adjust this dynamically based on the hash type? #endif #endif #include "arch.h" #include "aes.h" #include "jumbo.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #include "md5.h" #include "bcrypt_pbkdf.h" #include "memdbg.h" #include "asn1.h" #define FORMAT_LABEL "SSH-ng" #define FORMAT_NAME "" #define FORMAT_TAG "$sshng$" #define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1) #define ALGORITHM_NAME "RSA/DSA/EC/OPENSSH (SSH private keys) 32/" ARCH_BITS_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1001 #define PLAINTEXT_LENGTH 32 // XXX #define BINARY_SIZE 0 #define SALT_SIZE sizeof(struct custom_salt) #define BINARY_ALIGN 1 #define SALT_ALIGN sizeof(int) #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 // openssl asn1parse -in test_dsa.key; openssl asn1parse -in test_rsa.key #define SAFETY_FACTOR 16 // enough to verify the initial ASN.1 structure (SEQUENCE, INTEGER, Big INTEGER) of RSA, and DSA keys? #define N 8192 static struct fmt_tests sshng_tests[] = { {"$sshng$1$16$570F498F6FF732775EE38648130F600D$1200$1777f12047d4ebab06d052d52946e5e0e73b41d5077b20e1ffe1c97ef9459b8c6844fecc24fdf63314c8889398fa140026339c85336278600e299c0f4c236648ca684f0c122e66d3e860e19eab8b46a564eb101def1c6a38f2f1800040c6b59a66e7b86e145e180f8a126e46544be1e17dd32e4e72f735c9e6b0ca4bbbb32ccf34ba0a7827858b0be32f9e53f13466e2ac78c3fecdf2a51cd7871286a3a91f9c71ae9e857a74bcc06071af6f60d827f7e13ccf6c1be722246c0796f509744c2b1b1452315ea6f86a1c8765d1f0c1d795349b4ea1ba229318b392fe505292cd0c6b4e3e9b2acc13b96943d92fa5635e05b7795989906274b0fb1894102d07facdd8f2122299960e1490823d62bbd5bf6d6c92ed26e68cc2edc93fbffec557a5d187fffe085ded9408ac63293851a684ca10d6e9a4ee9b5c552c827caee1f1c41870fe2d0e79bc4a0b85478fa82a58f947d345122c8ac7c80ba2ae8452b093dda70e2a4329fce70af9cf98e19477a622083664d1e62393a01b20371fc5be9390059f1c4af75d5448a2fbe1aaa46701c696afec927c67d15c046036531d9252faa08bbf9ea0e019ea574e6af94edd7ec17c83c0f87e34c7456e19bc53b2de04dafa83267694c1f61d038e0fc5f8f1b8ce573da470e6db6d38c0e8f7141ad9e9609ea408e3823271e987766039d484bc88f23f2f2a1175636ece950c7d82f43726287fef37da945ec6ad6adc04cb59f66087f68a3e84e8cc39c578bcbce3aaf67f1325d3d20dbd5872cc88ab72fc0bda05bf969eca08f8cafb306424a1597ba5d612e155b4723c2c1bee9a8e3d195be3b798ea417008a2340a919e23ac899ea4dbc4ef05af2cf6b12293eeb293584b37d3f8465e36a62d65b21f68725603e11dc14acf4e3855e25980387a34a34919fdd49844ed888e37199bb26df1bbbc303e895615fcbb0aa9ddc8a2aa685da942a1e68dc3a355d27236f74220d404d25e0ac64ae9203bb04296b4d67481a4f516fd22e47092073c9c44fa098670d736c5c509e55d6b40d3bf346ea5bb0007e32e9d8290c2633621fd84c2f5f428a5649ff3a16d00fec21381543202f2ee12078ddea8a371935f2ffa15aafa644e111a29c1c4703bf8e9cf1397356e296c5484558b96639b9cf3703aabff0cf42864dab91b1e09c6439159bc95374da7a5d416402286390e76cb766cd94e7a002596e8862b8d7e46c1fc6f7bdd0b93c73d2dc3cf58ea31bc549086209f450bb7460d5e9ba0d0f7b80337651f45bf83bef1783c3a15631c82428bfe167dc0692402d7f15144fff01ad8596970439ce8a2df0107c85a23ef93edd19f62de499ab58ada581886494c3e52dd5ec53c191f6d62729729a252c2c0d8024950d1637cfd7c61a4fe64ce41cde76fe00fa2607af66a44d3b4b8836820f40c03669f08b4e986f4d03c09e3c026a910f83be623d7f68ff80d81662f020f433f7a896e10134a278cd9a8517d3bcd77c5287f7d41bc52d2f8db79b5f8f9ed6d6f45a482b13cb91ecdef43ebe38f5ad71836185ae6faf1dd11c50cc1759e4834fcab2b3523d4224a32d2eaba224a2c950dac7524afc74f02f17b511f3b22d577a6928d40909bed64f6ed27096dff591a8fbee3f32733fd2b36c0c4708a5224f165af000d93832e211ae52465f680e7a4fd66bb5eb210c4402eb58f6ebfde", "strongpassword"}, {"$sshng$0$8$DAA422E8A5A8EFB7$608$fa7b2c1c699697dd487261a213a0dd088a86bc03f4e2db8b87ad302e3581bdd8ed17d0a3ced3e7179ef17beea9064ee862017f472de293d655f6b1cd7115e27c328cf5caf1b5896952590cd82d123fcf6c5da3b43f5435c829ebb595300c828e04d57c7ade57efe006305b32fe79afd0d14cadba681b4dc3a69b25a1e71ddbd353465217c311d11721f1cba05d1226ff0e7d261156f0837753bcaaddfec383591f61470a4318cf679046d43490a1eef33014a90865917ccaa16f986724b8ee421d990327a46410362b4992406af41a88e3c5e5bbb7707ba08517e7ac8295ad0b934c38968f05fd372f1ee29e24eddcbbacba5b3e1b7150e51ba4e17b4f54319630e2d5372adc46e4de437f64b3d11670eb25fc94c7e9bd0579806bbf16c6cfe529a4bc0d3918ca4777f8418e789163660d9bbe0aa297857ee4922dffe310e6967fba2ee2e06707d9bbd9c8601bad7ccfdcb8a948074de511be7d588b7b71d4b5f0b1e19020b54efc4d626b2e4d85c0a40682517128b9ecc29f882996f4f6b655bb1986e293cb5271fe98c61d8b2e6e8338fee42f22674fc8b2da475663ba19644e7de76927cd9e333b533ad7617cc7a9f19dc7c00c240ed92c2fb1aaf6495bd16ab9fae4650567ad8b175d02f9e6a9737362168035670017fd9ad87cf4e916f47baa5efe0d04939295fba608f83fa811b946d12afe77836dc6d0d398824a355926ce5848dace776c7a7ab7109be495894bc98a2cf04107368d5d8777a1d0ef19782ebb1527b564ac0f5d4ac91e81f435cc21f5905b9753ee1a79913306957589943da161a6f5dc3082b80930553769ce11d82d9cb12d8a12bb4e56eb3f1200eb", "television"}, {"$sshng$1$16$A0B8FCAB2B655BA3D04B2020B89A142E$1200$a4dbb4c526a6bea3aeca26e6d89f0c19ebdfb9154ce4cdb4bbfc3420ffce58dd6ae1077dba9753a001b22b07e4248bb2d4a3c4bf2cbae43f8c29f55c3c36c656aa7262fd2f429a8f7fbc443c175687b20c78ed3e409a03fb7b0afa22ef2fad63d580ce00e31948171787a8861e34d4c966d0c54d299585df757a76a278c28e899b7b16fe74d38ad9f6220a2ebbd2b427a3f29436feb2e000a3f1b26a3906eb84314a8f8dc211aeab0e5d5c776b54a59c3630a96de506fdfcf7a6991bae4e90ef2f6f99d5a92c78eddc1f7bd75a94bc472c32ef82b56261889a60fbaeee0b145c4aa785bff8b854b8c61fde3f018e10e9b4de6fbf5aa7ff30d985a8b8da1e55459855cd96076d0de5ff31a593ca7ff4badb07886808c624ceaf955569138c57fd9006877d8a174bce3347b72490d5181d83a20500dc49e8160d075659b568820ac2788a50aba0488a598c6d90821026c9a6213f279b8773eb3c5b60a73e48199ed7cba66595e7f219c4d0f5e231219619ffbd3d7bd1dad4ada8bf8d9ddbd5319ff47922e6858946778daf0e6b47973db77f56dcc356691ccc652ccd53d9f9895c896d99cf0c498e5a8d712f2e8a159a80e8a3e68b812650f0ddb0e1300438b914f4c28d232c443768bccaeb204212494782003343a5cf6d455b95efc94c8d95544db32c0539d0e1fc0288b5ecfcbc4bb7b6278a54093a56ec0ad5928c113aa96a114d7fd3aec173759f5c081f1d0a2f0922433ff17911901c0f0f940b1f345d161d91ecd4456e9b8458a14e0fcbaf2b750201c10cff3c8f387004b99be515f45c00200efea4e36d83524a760c20518d902e38d6121bef29b479edbf44be4c51730c3bbc86dd6abc40b67470e12b8235cb1317b6dae34d99248f3a8f98a77d848360c01a645f76c3abc3f66af0d1f0f7bbb77930b3f85430062fb1a82c5aff1350bdba049a8bc7bcc33e61fd3e8484b9e6d51ea121337b7553284cd1222a2469e1c7158f13ff63307530243af25b4b36d19ba0604212ebcb42b450c475e238c2b9f021088b16aacfb6e564eef86860fd077f90de471fc26621360609e526444e7556bb8d6de703271a4ba8dec254305cd1163f90a32d8966f599903de0e4b62e3a8db15753fb099d164d9bd44c05f163fd96ef73382c779214c8ec93498f2f5fa31a74ad6ac3136a37c6f6c27b1dd7b93c1e292f2ef0d569581f45cb0747ee5a2fcba5781cdc96b9b2f07bdbaf7ff4e0432873072112fd17792c91548393cd58a7eb8b126f17ee107f9670567c0ab6e6b9a2997054d968feb29f479fb8b7888138971a14228bad1854d9804f1bea77014b7f0d1037444178d66d2db19b660cf5e84726b2f730662a1df93abc54ae521d3d1691fb4fa48b087ead9dfccf4e6367d9a25f48a019a6affbec84c20ae7b10c2a169cfa07a4d26c5035c02d3b7d01681bf56bf568ab1f740c86ee6f43b8b440eea1f1139a89fa5bc653164426856e3a5e22ff5fed05ba7a054f6d4609eb142ef113a24f05b92ba72c40cd9bde09d8125d462fd30bab15cb47130fa30730b26c0d399d14b9cb42ec56df024bb9bbcd18ab4d279ccf82b2c1fee8fdbade8bd506791a6fd51349b24cdc36ec4d88e6dd43a85b92a71458908271d298681f54aa567262fc70260cc15d7f5559abd7e7ee4d2c7c727bf5036c469b690ece969480240c", "Olympics"}, {"$sshng$1$16$ABF86CF7849BBC5C661A69F1F7B4C87A$1200$4941cb1e3d19fd7173e7b97faf6484d803fe6737628241e7e848b4d02ef63c91a99aba940c116157f96e7c8e91550df51df80324a5630244ae83fcc04304ca9f7a4d9621cab45a6d513afc06b2364c222a7907729e3562f676fb94d7a3cfb2551de2766e9d67c035fecde455fd741a67521d0f71673d7c364ac314850314b31b6899198544c0f2ab788ebd435cd291ae8c12576837f784ab7cd8a9bc24bea3823034154df1b3d9b6a064131a9eb81b3fd298f128458cfce450305602e087ea9957e29117942ee7a2fd8a980da55c364f781feeb1bf355ee0de54ce946d85930f786d6f6175254d5a4370ddc5c88ef13e23429b0d9d01f413f08ce5253141d84c8368308890c69b65be93a415923f98bc898f9cb4abafcbcddf2785f698e091d75eea0a90216ca47d0459cb2b8d95a8989f604a0c7bc8dc690791c0c73e6f7a2628ea7ebd8e1a39ae583c91668dca7806f226ab361f07bfd35f7130aefc83071b865cc004f963ef80a750008e920f1854321949d6143ffc33b60b58015d5f32c820006b0a91aa08755fd859c845d3a75d89350d9c12e7df32b9bcd188681b0981ac4713505c4b516ee4d1073ea715b68d0c10ce3f562f0b5b5383a6bd53008ec0e8927d78d8fd21d760e67da700db638f3835cfd523046ee0f2fffed05c3bd902b66765629f428bc2808e300fbe2064af9ab125ac4195f3b5756e09059cc391127c8efba8e50eaeb7e0a4d98561ce9540fa6b9b6333beb1efed5751e7acc1aaf4f0ff975e548a21b08a2ab49d4e6bf2336e60eb8684debe5d611769cee17c38e02d2057284d7948fdbe459092a0e4471107f55562aceb1045f0f1cefb3d89e549422f27618137c48dce1f149f6c8748d4a1eff89eed9140ec598def8d38457a239ee167af6d60ae995261d9cb47ce2d4d25b1520f8b75408b45265cf14d3892dcb53732fa4151312f4f6c8d46a54d07c23b4b253003489a28d544fa903eb0a72a3ae914dafed5218ce8d745b23bde33c9e346db79051e763866fba38f123b32c110b4168c3baf2ace735d0fcf5ccf7c2a29d67d4831c0cf3472ab8b197ed953056c42d7cc91646ca12a7bebb23fa4fb063217b7b7c9fec7688788798424acc32b3c704a91bee6a63ca5a2186df80e225f96679568c936c9a47b5615858211c72441a9ff4dc265ba98f346984bf92969af9bd035f93a47ddf8beef9ba84eacc1f76ee4bd1eb242dc9fb2949d287f685369d1122865926270f8bc83d7118801e77e48fd2dd4b996231564d1649c4636b734e483067c1181d1edc6dd424f517cd3ea3fe1ab904cda78b7b7d6c856a82c7e1c6ba3e9fb93da1dfeaf4e3eff86b4541ab38f526f509b915f787d6abd4a4c7174dfcb18f36ba72fa69b61a060b2785b3d3e8d28e9f6aa1a32aca3948ee48188a7ee24b160f3a6bc98297bd852d0759080cecd85dbc91bf4404705948c6a169e140a2479cdf5b840c3d6f99ea4e09b76730b4d33300f6a963c90cb0e07833a4bf314d72d81ae8ed5cf5ca4bcb6f35acb0c7d8298b70a5b61f87b13c3b1d02b56fe42c5465ad57dd4041b9b36943188acb8742052669b95fd98f3d18351f8748e9eb0f47d11a4d6ca2ec0348ef7d24e9f80c1dc239b513ed7867f25903875a1e9a983c5c8475b8de1f7f70423f1f472fca1e99a52b14105c4a47edb657eb87d0353", "extuitive"}, {"$sshng$1$16$925FA0A2EF7283A2F69C6CE69121D43C$1200$0498402851fd405114a860a1fdc760752bc8b7f44c77b2ef6a6d46ed3cee48d963bf34b905124c18823bc69819bbec29edebf4e697afffec2c35e79b993ff28b92d0355758b9c4ea00fb1f4bd48732059643ca2144b9c35de734d8db395076cb7c0468f6cfbabb1646345f907af82bf1598733d7aaa5496c55e662075d6bdb47cb941160fd1106570303d009bdc89fa3ecc07c84c3f91238a51db8ecc09f8e6b6c1395ce57970cbf2a3ef1341ddcb404e95832f0535a30b17048554b3341502619c48685db4706855ce62a86b3953f1219d4dae10243265d01264fa6408006188a40683e5de4952cb6796cd2593e9365065f51ff21b23b8bc075445226092b988114962ed5f4b97128cc69eca7a3d1169d2d83a632a5cc51290527bc848c7dd3d76554b28bb2bea0626f4fd27f3b9610e827e8211c60879d77ea1593d80908618b55081048bc2baef6848c410372b9a69358feb95c23d747f81b59577c601d55337b7c737d77bd742a115681a778c3d8e513a3ccd25cf833a32c73bf04476131b2bb498fac9496597163766b5f466b2478a564736c245cf0a0bf4b33be13eb2360dacbf8573b342f336d0341229654cd140674b18e35c04f917a9668306b4c93285825bdc8494c209d103212ea1deac7839db28acfb50fabc5c2b5057333ecbcb685adef5e962a526a02fd44f40a5af9c27d4211af129ad47b5fbc1d5f9f01e5ad1c53f728ead66a45cb8e6a9c1237aeb02374225ef2b63bc3ea6b2b1ab6136f90236ed5de5f88c6edde8ea75db8cf9aed8030537731dfe3ee855ab501f0235aeb05c8b2e3f4668ca0ab230cc8764863bf3ea71bbce2763556a14cdc5e09b0fa8e9ce6948d377b087fe04d1a5ae2ca61350514376cf447119fad0ea158b16b86be8f43742fb9934d3c1e8cc46497c191d1703a85e0b8b102b27595471687c5d1335a2290214fd46d9568d4b2845b88f116d5c2b3e3766030beb3d71157ff0c4fabd13aa173795db5b88d059ec79bf50c22f3119411b4279d1c7c0e88a7b01fa47e52553913b0ceee272500fedfa28483a849c186ce31b2134945dcaa84c13f7e474d59b0a0f5f768a8ec4cd58c8499b3ba3e1880fa7764ea9e424b29e5f6ea93671bce2985ea0d51efc2774f023c65e99be3db57c4a83e3c2f59fee62f60fa8c7eb66ff870f05cffd7ea208520a0640fe86f619944b389cfe695567ebc82829407273ac67130d3b09c8ff172a77a8ef56b18aac65d5607ef9f6ee791c0ec5b6447bd847b5d6a5411142a110700d5bb04424111ddfee27139ebad931da60de1e8bfc87f2b53b8720435d3dbb358445fc3493ada42192783741f72b5934d6a399e1ea16291fad9f38e49f23e3ad7303d4d1e5677b9a81aff8dfca7abb33455e4e7858a9de656e4239c22ac2e91b558bcc25b356be583487ffc24459873febd2becae6056544d56fe670342347048a8abca019d2203794fd8652d31899f094d67aa304d1e607460efbdf05b3b407de54fc9e33d1879fe577091036b77e43e382f1acbbc12cb3bc83f25a4791265741e018b4825beb0a6901db19ee58a3c378df4ffeb4c9def7e730a08546d3f698f5ca4f98c81deb2982729ab95167ecaa1d6320b12d48f4de2fc9891b8e117c88a6f5bff046b1ea8cab4b0af8a488dfa6353ccaa3125e959322bd0ad4662ad15cffb86f3", "C0Ld.FUS10N"}, /* DSA test vectors */ {"$sshng$0$8$78DAEB836ED0A646$448$95d5a4abd38c957a969a322aa6936798d3c8523e6e553d762e4068b130294db89b4e67b790825bd6e0de1b60528557d8faf0ce4d413d92818f0cbb315b5b7902df845722032bc6883b4b87b5e5cce406c15f6d0b2d45916d156a661b0cc6a421dc7dd794788df9085a59c6f87c5baed7c6bc4a48a64c5a439d9b9f7e808397fce1fc1ed789e0114cb03cd392bf660541041c1f964476044d39dd71eb240231f4111494b3fbe85a35f2bbe32d93927aedecf959e786a51be450ade61e746b8eae6174016e8dabf59a358a518c3445c93b4824e61c065664f24b3e773643c0e47996b7c348cefe63407303cbb37e672905bb0a4fd51e4cfd920563863987f96f9fa2098d0ed5c9244f21ba4df28d9826fd8e0f525af349f7b54f0c83bee8de8e1d3702a6edc0a396af85b8805d3ac4a0b01f053d0454856fa3a450f199637ae0333670483a454769b5bcbb5a6329d07c0ad6ac847f11e32ccb835650fb9404880c1ad19548cfb57107d43cc8610b9869165a8b116867b118f97ef74f09ab285114512f599d066d46dae846a1b04787f3e30410b234e5fc098e8a39419a2dbdb5a25c709b13fd31eb2d0e6994e11df0e32ff45b1a3c95c153ce606912a8dc966daf", "television"}, #ifdef DEBUG /* this key is SUPER slow, now that OMP_SCALE has been increased. */ /* it would be nice to get one of these with rounds set to 2, */ /* instead of the rounds=64 of this hash (pass_gen.pl update) */ /* new ssh key format */ {"$sshng$2$16$cc2c3c68c39e0ba6289ed36cb92c3a73$1334$6f70656e7373682d6b65792d7631000000000a6165733235362d636263000000066263727970740000001800000010cc2c3c68c39e0ba6289ed36cb92c3a73000000400000000100000117000000077373682d727361000000030100010000010100af9bf6a900464f154916fac3d80476e0ee739ff7f25a96b562ff9f4262db1972992947dfa89da47f9fa5f4d9e54a2d103ce63779746888c298693663310f054af1c1dc90f62b22f630703726631c03ff217c29a32fd9f9bc178aabe9666c37c2c2bf4a2b4c528efe51e755053216d41e860ef996b549184cd15bd17641128690d2946a76261954edfee942bbefbb182df320d3da7f46a5fcddc15b5ecbf9b1b822cbc9ef978e8b639e8eab2e3b1229d429da4f6bdc27af2f2aab0e187a6cce91b95a8ac6f5602773d0014f1e8124a89e43e502bebb4d21f6a148e208e2d591391d1aede6a0a6d499a3de9996474310dd9d3233e3f05e9d0e85aba44715e838bd000003d08168da8d056f904faf9d80b22c08141e8b068a3af64ace3b5ffbad24b884cd37ae7ad89546031ab834d612b44266b95263a5c38f0d628d704caf70944629ad66d3cef974ec4faaaeb7d7df67f1321bb606ec6e14060c0de1a63a5732ca89b94ae765cb0671a4a1a76b42c06c220546bbf0f8a88471c0bf4200a0cbe0d346be67f688dcf76a3666f7c4447b3ced2d0c9a2fa50abc6ca222ddd70aeb82d65f8fefa313b3db76c5a03478bebc9e0942e17c07ae11d1fbe1b0b380ca2506a26aaf5cdb8668af186d1bc293844bd9c2cc8bb40530387f9a5e11770484593af69384fc003beb82beffa00c1b23f7d6a9bd8f6153cb7abd9531008df384a3455d7cdd7020df4dc507f34e697ad437f01989271b17b93045265f20e6fd02f63ac1e13ec85f8224bc60dd91e15dcfa2ec4f6986e3b37ea6bd571ca18089402f80c121323eb774708cc6ab470e05a53428b65dede47ded97c4f5941be44f6290d5ccdd9bea95b06190efee6c64d874798b6045c5d553a1f68c95f143d0a6893877796fff452851d64ac73c007b91dd6058a5c31165003d9d66b4a1a40c2f82e5c3be6820b109addc0f088c84576e30c7202da3304636de4035f3ca8b032885aa2bedb4d1e134c1615139fb6ed7fa924c2e8abdfcd75da029e910ee8a9d4af594e2a9732115237b6ba3c24f8dfd4bed0a7cb4d96e114bff30e9c68226ae04de6fee2340b41c49cd08982a3f21169853366882a4af43e256cb0d09c88856c46f2ad8a7bcc3896efe5f4f104ef9b595cd08b4b76d6ac074f4fa4a488f508c6106603cb4ca65af819d2222a086ddd16a63021627f337ab9d86b33150808313bfe7368737bf38e7dee410cf08f2effef780d161e2cb734135bba36fe2ee3319cda95242b89b50673c88eb3dfa331e987e3fbde92cec7e019990d97b11c71d5b04b8ec451549abc9ed195a080aefb1d77eff476f9de4315fca5bf6386438869a8d59a5f0badda70b337bb9bdcff966229d631286d3c5b97c41f3ef5daa6ef4416577815214733e8602ef7f8abc3a19ee58f48b10c8ab1d5c76f01febdb29b36910d615d4022849ec117f02b6ae898cc0ff67e61df43284d3ff739ab4c34fe2854797ae0b66e0ba234e236daba6eb9172e9e1f4a0f5283ae9b336059d2ab2c7145e0a4de4b5bed3baf87c90ad4d47b94eb1c01b07510191f06b9eaf014e225b2bce46d5a7080c6d1daf64460836d7630c157e44afc9483a777d76fcafbfc2c4f299211c0465f0151f13707f815700944ad6a17e23e63dd0eecb5cdb5284ad92dd853e0ce136bc77633fef514e6aadeb61e7fe885fe399076cbd5464a6d17efa1e116853e80cf08adea7e550b0d27e6a96d835069674fd7bcc$64$358", "12345"}, #endif // EC private key {"$sshng$3$16$00B535FBA963402F20C12648A59D7258$128$dfa09369ff38f33c9789d33760d16fdd47730311b41b51a0c7b1dd1dec850c5c2ff523710af12839f25a709f0076cdd3e3643fab2ea1d17c6fae52a797b55e752b71a1fdd46d5bd889b51ddc2a01922340e5be914a67dabf666aff1c88275bd8ec3529e26386279adeb480446ab869dc27c160bd8fe469d5f993b90aaffef8ce", "password123"}, // RSA key encrypted with 3DES, this caught the incorrect padding check bug {"$sshng$0$8$F1621D1A561534C3$616$ab1925ec002675445db989f2591a5bf7a31a80e10131b6eebb20bc2d2b70e2a21f431bfc70228f3873b4e0bb902156a1cf829d50fa09bc035d5ddf04f2a403f4fd7bfe32b5219d6c74dd594d0babd07e28075be4eef6f015d1ce5be91fcd81a55f886d867995d4719bd8e0890e8fe4c8abc171d272442e1c6805b29e1cb996a2b2cd3e82e70df0270d98d88c8cd32a1164ebe6e1390e64ce15cc166054281619a125bf4776c7433cf653a87d40d3ae6b494d536c2d2974e697d34b8965239d976e9e1d8a3f1503c7bb6ebacd8f852f65b96e58e5a280411ea7737ba1410ec273722b1b3b91c83eba4c3a0c187be3bdb05d3fe9be55cfbde501adc8ff6ff257ecbd4efceb8d8e7a859af411565b3f3fb0fc3d9df056a265836ec18b234f7b6956a4202ae75e5ed2890d33e9abb355763cc56438509a199c4fe3e48e12fa3f6cc2e55f8f3b134ba2dec87b4d37d6209bbf84826d74cac0d96cf4303654c36476edc38f750d4d7d0a495aac5f6ec8ffc6fcceb482985b81636fb66f05502d00c00e5e8b39a17afe46faf18ac590cb4fd59cf88b62209378c47be74b902956b555bdeaba14f447a8b0e4522ea6d0f492045f3b14a49c3d7d9f6cd3f8782cb1fce3bacd57e71e918726a514a39a474661c6989796a9fab1d8f6cc684b4963ced9982a01ee50e076937dfccc4a1d00870b238f30fc4fa258dd6a62d3c7a79bb9f23b0be25261bf222681859058fc56660d59124d114d7528e98b8c2eb8d465514894a6796b07f244bb8334bb4a440245d5a942a05fd401634cbc6f32ee223b4ec49446fd0fc2b30ed05324837ba8a2415c23bc4fc526ee15766c6a29047ba5bb05f38a122160ed91c769ae", "albert"}, // /ssh-keygen -o -N test12345 -t ecdsa -f test {"$sshng$2$16$6931efeeafd9d3fefc5d3f220d6e32f3$375$6f70656e7373682d6b65792d7631000000000a6165733235362d6362630000000662637279707400000018000000106931efeeafd9d3fefc5d3f220d6e32f30000001000000001000000680000001365636473612d736861322d6e69737470323536000000086e6973747032353600000041043da6ae45fd7e65967e3434e5af68d1f92c08b2dbc837ba50f14f58c3fe9f715062f61d3485d0426dec2b021b69f4a8272bdeaf90d9be5b3bd101f2381e9a1758000000c0d876c4b88fc4b76a43b95813d68e37000e6bea260da8cde01144a8ea052e66e5e42bb488b1c39822541147bc21a16cc6be613fa76d6e524073a68e94d944723abb34cec635dc4e3ffa0411695452467c294b95c78f34466c2154bb97f54d5712b7cc08d2902a0f874543eb6660c4c4adccbf1528cfb5348451d93a70d8318a3716819a624299aa5e9c21ec6526377c7bbc3f30173dd9a9b3bc0ef0193a9a21210db076c93c228fd23eaa83796d4f6a4848760db010054f1b9aed7445061a3512$16$183", "test12345"}, // /ssh-keygen -o -N test12345 -t ed25519 -f test {"$sshng$2$16$a439509f8aefc40a17a504ac81c46601$290$6f70656e7373682d6b65792d7631000000000a6165733235362d636263000000066263727970740000001800000010a439509f8aefc40a17a504ac81c466010000001000000001000000330000000b7373682d65643235353139000000200b31c6439dc6b42c9de146c70c752e33877baa7a5875c37ce092e5689dadadee000000a013bbe4b8cd8e0880a7c5dba953fdc5b0e4380b1904c631cb10c9f19ddadd52341160120f459ea1325681bc8f5c40f45a5ef055bc79ea9a05bc94bf668e2808ea6cf88a5ff3f418c4b13664c02456086671776969ce9cb21699818d16b4deae2dd30f03f0f85fc8dd54901a7ad884c35a2b28bd08b418d15ee7d8ec0332649eeff4fab6299eca59f096c2b56f753de0dcc226c0d8404bf44a73a608de2589545c$16$130", "test12345"}, {NULL} }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static int *cracked; static struct custom_salt { unsigned char salt[16]; unsigned char ct[N]; int cipher; int ctl; int sl; int rounds; int ciphertext_begin_offset; } *cur_salt; static void init(struct fmt_main *self) { #ifdef _OPENMP omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); cracked = mem_calloc(self->params.max_keys_per_crypt, sizeof(*cracked)); } static void done(void) { MEM_FREE(cracked); MEM_FREE(saved_key); } static char *split(char *ciphertext, int index, struct fmt_main *self) { static char buf[sizeof(struct custom_salt)+100]; if (strstr(ciphertext, "$SOURCE_HASH$")) return ciphertext; strnzcpy(buf, ciphertext, sizeof(buf)); strlwr(buf); return buf; } static int valid(char *ciphertext, struct fmt_main *self) { char *ctcopy, *keeptr, *p; int len, cipher, extra; if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN) != 0) return 0; ctcopy = strdup(ciphertext); keeptr = ctcopy; ctcopy += FORMAT_TAG_LEN; if ((p = strtokm(ctcopy, "$")) == NULL) /* cipher */ goto err; if (!isdec(p)) goto err; cipher = atoi(p); if ((p = strtokm(NULL, "$")) == NULL) /* salt len */ goto err; if (!isdec(p)) goto err; len = atoi(p); if (len > 16) goto err; if ((p = strtokm(NULL, "$")) == NULL) /* salt */ goto err; if (hexlen(p, &extra) != len * 2 || extra) goto err; if ((p = strtokm(NULL, "$")) == NULL) /* ciphertext length */ goto err; if (!isdec(p)) goto err; len = atoi(p); if ((p = strtokm(NULL, "$")) == NULL) /* ciphertext */ goto err; if (hexlen(p, &extra) / 2 != len || extra) goto err; if (cipher == 2) { if ((p = strtokm(NULL, "$")) == NULL) /* rounds */ goto err; if (!isdec(p)) goto err; if ((p = strtokm(NULL, "$")) == NULL) /* ciphertext_begin_offset */ goto err; if (!isdec(p)) goto err; if (atoi(p) + 16 > len) goto err; } if (cipher != 0 && cipher != 1 && cipher != 2 && cipher != 3) { fprintf(stderr, "[ssh-ng] cipher value of %d is not supported!\n", cipher); goto err; } MEM_FREE(keeptr); return 1; err: MEM_FREE(keeptr); return 0; } static void *get_salt(char *ciphertext) { char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; char *p; int i; static struct custom_salt cs; memset(&cs, 0, sizeof(struct custom_salt)); cs.rounds = 1; ctcopy += FORMAT_TAG_LEN; /* skip over "$sshng$" */ p = strtokm(ctcopy, "$"); cs.cipher = atoi(p); p = strtokm(NULL, "$"); cs.sl = atoi(p); p = strtokm(NULL, "$"); for (i = 0; i < cs.sl; i++) cs.salt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtokm(NULL, "$"); cs.ctl = atoi(p); p = strtokm(NULL, "$"); for (i = 0; i < cs.ctl; i++) cs.ct[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; if (cs.cipher == 2) { p = strtokm(NULL, "$"); cs.rounds = atoi(p); p = strtokm(NULL, "$"); cs.ciphertext_begin_offset = atoi(p); } MEM_FREE(keeptr); return (void *)&cs; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } #if 0 static void generate_key_bytes(int nbytes, unsigned char *password, unsigned char *key) { unsigned char digest[16] = {0}; int keyidx = 0; int digest_inited = 0; int size = 0; int i = 0; while (nbytes > 0) { MD5_CTX ctx; MD5_Init(&ctx); if (digest_inited) { MD5_Update(&ctx, digest, 16); } MD5_Update(&ctx, password, strlen((const char*)password)); /* use first 8 bytes of salt */ MD5_Update(&ctx, cur_salt->salt, 8); MD5_Final(digest, &ctx); digest_inited = 1; if (nbytes > 16) size = 16; else size = nbytes; /* copy part of digest to keydata */ for (i = 0; i < size; i++) key[keyidx++] = digest[i]; nbytes -= size; } } #endif inline static void generate16key_bytes(unsigned char *password, unsigned char *key) { MD5_CTX ctx; MD5_Init(&ctx); MD5_Update(&ctx, password, strlen((const char*)password)); /* use first 8 bytes of salt */ MD5_Update(&ctx, cur_salt->salt, 8); /* digest is keydata */ MD5_Final(key, &ctx); } inline static void generate24key_bytes(unsigned char *password, unsigned char *key) { unsigned char digest[16]; int len = strlen((const char*)password); MD5_CTX ctx; MD5_Init(&ctx); MD5_Update(&ctx, password, len); /* use first 8 bytes of salt */ MD5_Update(&ctx, cur_salt->salt, 8); /* digest is keydata */ MD5_Final(key, &ctx); MD5_Init(&ctx); MD5_Update(&ctx, key, 16); MD5_Update(&ctx, password, len); /* use first 8 bytes of salt */ MD5_Update(&ctx, cur_salt->salt, 8); MD5_Final(digest, &ctx); /* 8 more bytes of keydata */ memcpy(&key[16], digest, 8); } inline static int check_padding_and_structure_EC(unsigned char *out, int length, int strict_mode) { struct asn1_hdr hdr; const uint8_t *pos, *end; // First check padding if (check_pkcs_pad(out, length, 16) < 0) return -1; /* check BER decoding, EC private key file contains: * * SEQUENCE, INTEGER (length 1), OCTET STRING, cont, OBJECT, cont, BIT STRING * * $ ssh-keygen -t ecdsa -f unencrypted_ecdsa_sample.key # don't use a password for testing * $ openssl asn1parse -in unencrypted_ecdsa_sample.key # see the underlying structure */ // SEQUENCE if (asn1_get_next(out, length, &hdr) < 0 || hdr.class != ASN1_CLASS_UNIVERSAL || hdr.tag != ASN1_TAG_SEQUENCE) { goto bad; } pos = hdr.payload; end = pos + hdr.length; // version Version (Version ::= INTEGER) if (asn1_get_next(pos, end - pos, &hdr) < 0 || hdr.class != ASN1_CLASS_UNIVERSAL || hdr.tag != ASN1_TAG_INTEGER) { goto bad; } pos = hdr.payload + hdr.length; if (hdr.length != 1) goto bad; // OCTET STRING if (asn1_get_next(pos, end - pos, &hdr) < 0 || hdr.class != ASN1_CLASS_UNIVERSAL || hdr.tag != ASN1_TAG_OCTETSTRING) { goto bad; } pos = hdr.payload + hdr.length; if (hdr.length < 8) // "secp112r1" curve uses 112 bit prime field, rest are bigger goto bad; // XXX add more structure checks! return 0; bad: return -1; } inline static int check_padding_and_structure(unsigned char *out, int length, int strict_mode, int blocksize) { struct asn1_hdr hdr; const uint8_t *pos, *end; // First check padding if (check_pkcs_pad(out, length, blocksize) < 0) return -1; /* check BER decoding, private key file contains: * * RSAPrivateKey = { version = 0, n, e, d, p, q, d mod p-1, d mod q-1, q**-1 mod p } * DSAPrivateKey = { version = 0, p, q, g, y, x } * * openssl asn1parse -in test_rsa.key # this shows the structure nicely! */ // SEQUENCE if (asn1_get_next(out, length, &hdr) < 0 || hdr.class != ASN1_CLASS_UNIVERSAL || hdr.tag != ASN1_TAG_SEQUENCE) { goto bad; } pos = hdr.payload; end = pos + hdr.length; // version Version (Version ::= INTEGER) if (asn1_get_next(pos, end - pos, &hdr) < 0 || hdr.class != ASN1_CLASS_UNIVERSAL || hdr.tag != ASN1_TAG_INTEGER) { goto bad; } pos = hdr.payload + hdr.length; // INTEGER (big one) if (asn1_get_next(pos, end - pos, &hdr) < 0 || hdr.class != ASN1_CLASS_UNIVERSAL || hdr.tag != ASN1_TAG_INTEGER) { goto bad; } pos = hdr.payload + hdr.length; /* NOTE: now this integer has to be big, is this always true? * RSA (as used in ssh) uses big prime numbers, so this check should be OK */ if (hdr.length < 64) { goto bad; } if (strict_mode) { // INTEGER (small one) if (asn1_get_next(pos, end - pos, &hdr) < 0 || hdr.class != ASN1_CLASS_UNIVERSAL || hdr.tag != ASN1_TAG_INTEGER) { goto bad; } pos = hdr.payload + hdr.length; // INTEGER (big one again) if (asn1_get_next(pos, end - pos, &hdr) < 0 || hdr.class != ASN1_CLASS_UNIVERSAL || hdr.tag != ASN1_TAG_INTEGER) { goto bad; } pos = hdr.payload + hdr.length; if (hdr.length < 32) { goto bad; } } return 0; bad: return -1; } static void common_crypt_code(char *password, unsigned char *out, int full_decrypt) { if (cur_salt->cipher == 0) { unsigned char key[24] = {0}; DES_cblock key1, key2, key3; DES_cblock ivec; DES_key_schedule ks1, ks2, ks3; generate24key_bytes((unsigned char*)password, key); memset(out, 0, SAFETY_FACTOR); memcpy(key1, key, 8); memcpy(key2, key + 8, 8); memcpy(key3, key + 16, 8); DES_set_key((DES_cblock *) key1, &ks1); DES_set_key((DES_cblock *) key2, &ks2); DES_set_key((DES_cblock *) key3, &ks3); memcpy(ivec, cur_salt->salt, 8); if (full_decrypt) { DES_ede3_cbc_encrypt(cur_salt->ct, out, cur_salt->ctl, &ks1, &ks2, &ks3, &ivec, DES_DECRYPT); } else { DES_ede3_cbc_encrypt(cur_salt->ct, out, SAFETY_FACTOR, &ks1, &ks2, &ks3, &ivec, DES_DECRYPT); DES_ede3_cbc_encrypt(cur_salt->ct + cur_salt->ctl - 32, out + cur_salt->ctl - 32, 32, &ks1, &ks2, &ks3, &ivec, DES_DECRYPT); } } else if (cur_salt->cipher == 1) { unsigned char key[16] = {0}; AES_KEY akey; unsigned char iv[16]; memcpy(iv, cur_salt->salt, 16); memset(out, 0, SAFETY_FACTOR); memset(out + cur_salt->ctl - 32, 0, 32); generate16key_bytes((unsigned char*)password, key); AES_set_decrypt_key(key, 128, &akey); if (full_decrypt) { AES_cbc_encrypt(cur_salt->ct, out, cur_salt->ctl, &akey, iv, AES_DECRYPT); } else { AES_cbc_encrypt(cur_salt->ct, out, SAFETY_FACTOR, &akey, iv, AES_DECRYPT); // are starting SAFETY_FACTOR bytes enough? // decrypting 1 blocks (16 bytes) is enough for correct padding check } memcpy(iv, cur_salt->ct + cur_salt->ctl - 32, 16); AES_cbc_encrypt(cur_salt->ct + cur_salt->ctl - 16, out + cur_salt->ctl - 16, 16, &akey, iv, AES_DECRYPT); } else if (cur_salt->cipher == 2) { /* new ssh key format handling */ unsigned char key[32+16] = {0}; AES_KEY akey; unsigned char iv[16]; // derive (key length + iv length) bytes bcrypt_pbkdf(password, strlen((const char*)password), cur_salt->salt, 16, key, 32 + 16, cur_salt->rounds); AES_set_decrypt_key(key, 256, &akey); memcpy(iv, key + 32, 16); AES_cbc_encrypt(cur_salt->ct + cur_salt->ciphertext_begin_offset, out, 16, &akey, iv, AES_DECRYPT); // decrypt 1 block for "check bytes" check // AES_cbc_encrypt(cur_salt->ct + cur_salt->ctl - 32, out, 32, &akey, iv, AES_DECRYPT); // decrypt 2 blocks for padding check, iv doesn't matter } else if (cur_salt->cipher == 3) { // EC keys with AES-128 unsigned char key[16] = {0}; AES_KEY akey; unsigned char iv[16]; memcpy(iv, cur_salt->salt, 16); memset(out, 0, N); generate16key_bytes((unsigned char*)password, key); AES_set_decrypt_key(key, 128, &akey); AES_cbc_encrypt(cur_salt->ct, out, cur_salt->ctl, &akey, iv, AES_DECRYPT); // full decrypt } } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index++) #endif { unsigned char out[N]; common_crypt_code(saved_key[index], out, 0); // don't do full decryption (except for EC keys) if (cur_salt->cipher == 0) { // 3DES if (check_padding_and_structure(out, cur_salt->ctl, 0, 8) == 0) cracked[index] = 1; else cracked[index] = 0; } else if (cur_salt->cipher == 1) { if (check_padding_and_structure(out, cur_salt->ctl, 0, 16) == 0) cracked[index] = 1; else cracked[index] = 0; } else if (cur_salt->cipher == 2) { // new ssh key format handling // if (check_padding_only(out + 16, 16) == 0 && out[31] >= 8) // this padding check is quite unreliable in practice! // all keys don't have a non-zero length padding, so we use the "check bytes" check instead if (memcmp(out, out + 4, 4) == 0) cracked[index] = 1; else cracked[index] = 0; } else if (cur_salt->cipher == 3) { // EC keys if (check_padding_and_structure_EC(out, cur_salt->ctl, 0) == 0) cracked[index] = 1; else cracked[index] = 0; } } return count; } static int cmp_all(void *binary, int count) { int index; for (index = 0; index < count; index++) if (cracked[index]) return 1; return 0; } static int cmp_one(void *binary, int index) { return cracked[index]; } static int cmp_exact(char *source, int index) { unsigned char out[N]; common_crypt_code(saved_key[index], out, 1); // do full decryption! if (cur_salt->cipher == 0) { // 3DES if (check_padding_and_structure(out, cur_salt->ctl, 1, 8) == 0) return 1; } else if (cur_salt->cipher == 1) { if (check_padding_and_structure(out, cur_salt->ctl, 1, 16) == 0) return 1; } else if (cur_salt->cipher == 2) { /* new ssh key format handling */ return 1; // XXX add more checks! } else if (cur_salt->cipher == 3) { // EC keys return 1; } return 0; } static void sshng_set_key(char *key, int index) { strnzcpyn(saved_key[index], key, sizeof(*saved_key)); } static char *get_key(int index) { return saved_key[index]; } static unsigned int sshng_kdf(void *salt) { struct custom_salt *cur_salt = salt; if (cur_salt->cipher == 2) return 2; // bcrypt-pbkdf else return 1; // regular "ssh kdf" } static unsigned int sshng_iteration_count(void *salt) { struct custom_salt *cur_salt = salt; return cur_salt->rounds; } struct fmt_main fmt_sshng = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_NOT_EXACT | FMT_SPLIT_UNIFIES_CASE | FMT_HUGE_INPUT, { "kdf", "iteration count", }, { FORMAT_TAG }, sshng_tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, split, fmt_default_binary, get_salt, { sshng_kdf, sshng_iteration_count, }, fmt_default_source, { fmt_default_binary_hash }, fmt_default_salt_hash, NULL, set_salt, sshng_set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
step.c
#include <mpi.h> #include "grid.h" extern struct { char *name; int loc; int dim; union { GVAL *restrict * restrict p2; GVAL *restrict * restrict * restrict p3; } data_pointer; } *gv_temp; extern struct { char *name; int loc; int dim; union { GVAL *restrict * restrict p2; GVAL *restrict * restrict * restrict p3; } data_pointer; } *gv_dvg; void step(GRID * g) { { size_t min_block = g->mpi_rank == (0) / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 % (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : 0; size_t max_block = g->mpi_rank < (0) / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) || g->mpi_rank > (g->cBlkCnt - 1) / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 : g->mpi_rank == (g->cBlkCnt - 1) / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->cBlkCnt % (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->cBlkCnt % (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size); #pragma omp parallel for for (size_t block_index = (min_block); block_index < (max_block); block_index++) { for (size_t height_index = (0); height_index < (g->height); height_index++) { for (size_t cell_index = (0); cell_index < (g->blkSize); cell_index++) { gv_temp->data_pointer.p3[(block_index)][(height_index)][(cell_index)] += 0.1 * gv_dvg->data_pointer.p3[(block_index)][(height_index)][(cell_index)]; } } } } }
GB_unaryop__lnot_int16_fp64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_int16_fp64 // op(A') function: GB_tran__lnot_int16_fp64 // C type: int16_t // A type: double // cast: int16_t cij ; GB_CAST_SIGNED(cij,aij,16) // unaryop: cij = !(aij != 0) #define GB_ATYPE \ double #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, x) \ int16_t z ; GB_CAST_SIGNED(z,x,16) ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_INT16 || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_int16_fp64 ( int16_t *restrict Cx, const double *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_int16_fp64 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
bezier_classical_post_utility.h
// // Project Name: Kratos // Last Modified by: $Author: hbui $ // Date: $Date: 2013-10-12 $ // Revision: $Revision: 1.0 $ // // #if !defined(KRATOS_BEZIER_CLASSICAL_POST_UTILITY_H_INCLUDED ) #define KRATOS_BEZIER_CLASSICAL_POST_UTILITY_H_INCLUDED // System includes #include <string> #include <vector> #include <iostream> // External includes #include <omp.h> #include "boost/progress.hpp" #ifdef ISOGEOMETRIC_USE_MPI #include "mpi.h" #endif // Project includes #include "includes/define.h" #include "includes/model_part.h" #include "includes/node.h" #include "includes/element.h" #include "includes/properties.h" #include "includes/ublas_interface.h" #include "includes/deprecated_variables.h" #include "includes/legacy_structural_app_vars.h" #include "spaces/ublas_space.h" #include "linear_solvers/linear_solver.h" #include "utilities/openmp_utils.h" #include "utilities/auto_collapse_spatial_binning.h" #include "custom_utilities/iga_define.h" #include "custom_geometries/isogeometric_geometry.h" #include "custom_utilities/isogeometric_utility.h" #include "custom_utilities/isogeometric_post_utility.h" #include "isogeometric_application/isogeometric_application.h" //#define DEBUG_LEVEL1 //#define DEBUG_LEVEL2 //#define DEBUG_MULTISOLVE //#define DEBUG_GENERATE_MESH #define ENABLE_PROFILING namespace Kratos { ///@addtogroup IsogeometricApplication ///@{ ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ template<class T> void AddToModelPart(ModelPart& rModelPart, typename T::Pointer pE); template<> void AddToModelPart<Element>(ModelPart& rModelPart, typename Element::Pointer pE) { rModelPart.AddElement(pE); } template<> void AddToModelPart<Condition>(ModelPart& rModelPart, typename Condition::Pointer pC) { rModelPart.AddCondition(pC); } ///@} ///@name Kratos Classes ///@{ /// Short class definition. /** A simple utility to export directly the FEM mesh out from isogeometric Bezier mesh. Each Bezier element will generate its own set of FEM elements. Therefore a large amount of nodes and elements may be generated. One shall carefully use this utility for large problem. Previously, this class is named IsogeometricClassicalPostUtility. */ class BezierClassicalPostUtility : public IsogeometricPostUtility { public: ///@name Type Definitions ///@{ typedef boost::numeric::ublas::vector<double> ValuesContainerType; typedef boost::numeric::ublas::matrix<double> ValuesArrayContainerType; typedef typename ModelPart::NodesContainerType NodesArrayType; typedef typename ModelPart::ElementsContainerType ElementsArrayType; typedef typename ModelPart::ConditionsContainerType ConditionsArrayType; typedef typename Element::GeometryType GeometryType; typedef typename GeometryType::PointType NodeType; typedef IsogeometricGeometry<NodeType> IsogeometricGeometryType; typedef typename GeometryType::IntegrationPointsArrayType IntegrationPointsArrayType; typedef typename GeometryType::CoordinatesArrayType CoordinatesArrayType; typedef typename NodeType::DofsContainerType DofsContainerType; typedef UblasSpace<double, CompressedMatrix, Vector> SerialSparseSpaceType; typedef UblasSpace<double, Matrix, Vector> SerialDenseSpaceType; typedef LinearSolver<SerialSparseSpaceType, SerialDenseSpaceType> LinearSolverType; typedef std::size_t IndexType; /// Pointer definition of BezierClassicalPostUtility KRATOS_CLASS_POINTER_DEFINITION(BezierClassicalPostUtility); ///@} ///@name Life Cycle ///@{ /// Default constructor. BezierClassicalPostUtility(ModelPart::Pointer pModelPart) : mpModelPart(pModelPart) { } /// Destructor. virtual ~BezierClassicalPostUtility() { } ///@} ///@name Operators ///@{ ///@} ///@name Operations ///@{ /// Generate the post model_part from reference model_part /// Deprecated void GenerateModelPart(ModelPart::Pointer pModelPartPost, PostElementType postElementType) { #ifdef ENABLE_PROFILING double start_compute = OpenMPUtils::GetCurrentTime(); #endif #ifdef DEBUG_LEVEL1 std::cout << typeid(*this).name() << "::GenerateModelPart" << std::endl; #endif ElementsArrayType& pElements = mpModelPart->Elements(); #ifdef DEBUG_LEVEL1 std::cout << "Retrieved pElements" << std::endl; #endif std::string NodeKey = std::string("Node"); //select the correct post element type std::string element_name; if(postElementType == _TRIANGLE_) element_name = std::string("KinematicLinear2D3N"); else if(postElementType == _QUADRILATERAL_) element_name = std::string("KinematicLinear2D4N"); else if(postElementType == _TETRAHEDRA_) element_name = std::string("KinematicLinear3D4N"); else if(postElementType == _HEXAHEDRA_) element_name = std::string("KinematicLinear3D8N"); else KRATOS_THROW_ERROR(std::logic_error, "This element type is not supported for isogeometric post-processing", __FUNCTION__); if(!KratosComponents<Element>::Has(element_name)) { std::stringstream buffer; buffer << "Element " << element_name << " is not registered in Kratos."; buffer << " Please check the spelling of the element name and see if the application which containing it, is registered corectly."; KRATOS_THROW_ERROR(std::runtime_error, buffer.str(), ""); } Element const& rCloneElement = KratosComponents<Element>::Get(element_name); IndexType NodeCounter = 0; IndexType ElementCounter = 0; boost::progress_display show_progress( pElements.size() ); for (typename ElementsArrayType::ptr_iterator it = pElements.ptr_begin(); it != pElements.ptr_end(); ++it) { if((*it)->GetValue( IS_INACTIVE )) { // std::cout << "Element " << (*it)->Id() << " is inactive" << std::endl; continue; } int Dim = (*it)->GetGeometry().WorkingSpaceDimension(); IndexType NodeCounter_old = NodeCounter; #ifdef DEBUG_LEVEL1 KRATOS_WATCH(Dim) #endif //get the properties Properties::Pointer pDummyProperties = (*it)->pGetProperties(); #ifdef DEBUG_LEVEL1 KRATOS_WATCH(*pDummyProperties) #endif // generate list of nodes if(Dim == 1) { // TODO } else if(Dim == 2) { IndexType NumDivision1 = static_cast<IndexType>( (*it)->GetValue(NUM_DIVISION_1) ); IndexType NumDivision2 = static_cast<IndexType>( (*it)->GetValue(NUM_DIVISION_2) ); IndexType i, j; CoordinatesArrayType p_ref; CoordinatesArrayType p; #ifdef DEBUG_LEVEL1 KRATOS_WATCH(NumDivision1) KRATOS_WATCH(NumDivision2) std::cout << "Generating Nodes..." << std::endl; #endif // create and add nodes p_ref[2] = 0.0; for(i = 0; i <= NumDivision1; ++i) { p_ref[0] = ((double) i) / NumDivision1; for(j = 0; j <= NumDivision2; ++j) { p_ref[1] = ((double) j) / NumDivision2; p = GlobalCoordinates((*it)->GetGeometry(), p, p_ref); NodeType::Pointer pNewNode( new NodeType( 0, p ) ); pNewNode->SetId(++NodeCounter); #ifdef DEBUG_GENERATE_MESH // if(NodeCounter == 585 || NodeCounter == 588 || NodeCounter == 589) if(NodeCounter) { std::cout << "Node " << NodeCounter << " p_ref: " << p_ref << ", p: " << p << std::endl; } #endif // Giving model part's variables list to the node pNewNode->SetSolutionStepVariablesList(&pModelPartPost->GetNodalSolutionStepVariablesList()); //set buffer size pNewNode->SetBufferSize(pModelPartPost->GetBufferSize()); pModelPartPost->AddNode(pNewNode); mNodeToLocalCoordinates(pNewNode->Id()) = p_ref; mNodeToElement(pNewNode->Id()) = (*it)->Id(); } } //for correct mapping to element, the repetitive node is allowed. // pModelPartPost->Nodes().Unique(); #ifdef DEBUG_LEVEL1 KRATOS_WATCH(pModelPartPost->Nodes().size()) std::cout << "Generating Elements..." << std::endl; #endif // create and add element // Element::NodesArrayType temp_element_nodes; // for(i = 0; i < NumDivision1; ++i) // { // for(j = 0; j < NumDivision2; ++j) // { // int Node1 = NodeCounter_old + i * (NumDivision2 + 1) + j + 1; // int Node2 = NodeCounter_old + i * (NumDivision2 + 1) + j + 2; // int Node3 = NodeCounter_old + (i + 1) * (NumDivision2 + 1) + j + 1; // int Node4 = NodeCounter_old + (i + 1) * (NumDivision2 + 1) + j + 2; // if(postElementType == _TRIANGLE_) // { // // TODO: check if jacobian checking is necessary // temp_element_nodes.clear(); // temp_element_nodes.push_back(*(FindKey(pModelPartPost->Nodes(), Node1, NodeKey).base())); // temp_element_nodes.push_back(*(FindKey(pModelPartPost->Nodes(), Node2, NodeKey).base())); // temp_element_nodes.push_back(*(FindKey(pModelPartPost->Nodes(), Node4, NodeKey).base())); // Element::Pointer NewElement1 = rCloneElement.Create(++ElementCounter, temp_element_nodes, pDummyProperties); // pModelPartPost->AddElement(NewElement1); // mOldToNewElements[(*it)->Id()].insert(ElementCounter); // temp_element_nodes.clear(); // temp_element_nodes.push_back(*(FindKey(pModelPartPost->Nodes(), Node1, NodeKey).base())); // temp_element_nodes.push_back(*(FindKey(pModelPartPost->Nodes(), Node4, NodeKey).base())); // temp_element_nodes.push_back(*(FindKey(pModelPartPost->Nodes(), Node3, NodeKey).base())); // Element::Pointer NewElement2 = rCloneElement.Create(++ElementCounter, temp_element_nodes, pDummyProperties); // pModelPartPost->AddElement(NewElement2); // mOldToNewElements[(*it)->Id()].insert(ElementCounter); // } // else if(postElementType == _QUADRILATERAL_) // { // // TODO: check if jacobian checking is necessary // temp_element_nodes.clear(); // temp_element_nodes.push_back(*(FindKey(pModelPartPost->Nodes(), Node1, NodeKey).base())); // temp_element_nodes.push_back(*(FindKey(pModelPartPost->Nodes(), Node2, NodeKey).base())); // temp_element_nodes.push_back(*(FindKey(pModelPartPost->Nodes(), Node4, NodeKey).base())); // temp_element_nodes.push_back(*(FindKey(pModelPartPost->Nodes(), Node3, NodeKey).base())); // Element::Pointer NewElement = rCloneElement.Create(++ElementCounter, temp_element_nodes, pDummyProperties); // pModelPartPost->AddElement(NewElement); // mOldToNewElements[(*it)->Id()].insert(ElementCounter); // } // } // } std::vector<std::vector<IndexType> > connectivities; for(i = 0; i < NumDivision1; ++i) { for(j = 0; j < NumDivision2; ++j) { IndexType Node1 = NodeCounter_old + i * (NumDivision2 + 1) + j + 1; IndexType Node2 = NodeCounter_old + i * (NumDivision2 + 1) + j + 2; IndexType Node3 = NodeCounter_old + (i + 1) * (NumDivision2 + 1) + j + 1; IndexType Node4 = NodeCounter_old + (i + 1) * (NumDivision2 + 1) + j + 2; if(postElementType == _TRIANGLE_) { connectivities.push_back(std::vector<IndexType>{Node1, Node2, Node4}); connectivities.push_back(std::vector<IndexType>{Node1, Node4, Node3}); } else if(postElementType == _QUADRILATERAL_) { connectivities.push_back(std::vector<IndexType>{Node1, Node2, Node4, Node3}); } } } ElementsArrayType pNewElements = IsogeometricPostUtility::CreateEntities<std::vector<std::vector<IndexType> >, Element, ElementsArrayType>( connectivities, *pModelPartPost, rCloneElement, ElementCounter, pDummyProperties, NodeKey); for (typename ElementsArrayType::ptr_iterator it2 = pNewElements.ptr_begin(); it2 != pNewElements.ptr_end(); ++it2) { pModelPartPost->AddElement(*it2); mOldToNewElements[(*it)->Id()].insert((*it2)->Id()); } pModelPartPost->Elements().Unique(); #ifdef DEBUG_LEVEL1 KRATOS_WATCH(pModelPartPost->Elements().size()) #endif } else if(Dim == 3) { IndexType NumDivision1 = static_cast<IndexType>( (*it)->GetValue(NUM_DIVISION_1) ); IndexType NumDivision2 = static_cast<IndexType>( (*it)->GetValue(NUM_DIVISION_2) ); IndexType NumDivision3 = static_cast<IndexType>( (*it)->GetValue(NUM_DIVISION_3) ); IndexType i, j, k; CoordinatesArrayType p_ref; CoordinatesArrayType p; #ifdef DEBUG_LEVEL1 KRATOS_WATCH((*it)->Id()) KRATOS_WATCH(NumDivision1) KRATOS_WATCH(NumDivision2) KRATOS_WATCH(NumDivision3) std::cout << "Generating Nodes..." << std::endl; #endif // create and add nodes for(i = 0; i <= NumDivision1; ++i) { p_ref[0] = ((double) i) / NumDivision1; for(j = 0; j <= NumDivision2; ++j) { p_ref[1] = ((double) j) / NumDivision2; for(k = 0; k <= NumDivision3; ++k) { p_ref[2] = ((double) k) / NumDivision3; p = GlobalCoordinates((*it)->GetGeometry(), p, p_ref); NodeType::Pointer pNewNode( new NodeType( 0, p ) ); pNewNode->SetId(++NodeCounter); #ifdef DEBUG_GENERATE_MESH if(NodeCounter) { std::cout << "Node " << NodeCounter << " p_ref: " << p_ref << ", p: " << p << std::endl; } #endif // Giving model part's variables list to the node pNewNode->SetSolutionStepVariablesList(&pModelPartPost->GetNodalSolutionStepVariablesList()); //set buffer size pNewNode->SetBufferSize(pModelPartPost->GetBufferSize()); pModelPartPost->AddNode(pNewNode); mNodeToLocalCoordinates(pNewNode->Id()) = p_ref; mNodeToElement(pNewNode->Id()) = (*it)->Id(); } } } //for correct mapping to element, the repetitive node is allowed. // pModelPartPost->Nodes().Unique(); #ifdef DEBUG_LEVEL1 KRATOS_WATCH(pModelPartPost->Nodes().size()) std::cout << "Generating Elements..." << std::endl; #endif // create and add element // Element::NodesArrayType temp_element_nodes; // for(i = 0; i < NumDivision1; ++i) // { // for(j = 0; j < NumDivision2; ++j) // { // for(k = 0; k < NumDivision3; ++k) // { // int Node1 = NodeCounter_old + (i * (NumDivision2 + 1) + j) * (NumDivision3 + 1) + k + 1; // int Node2 = NodeCounter_old + (i * (NumDivision2 + 1) + j + 1) * (NumDivision3 + 1) + k + 1; // int Node3 = NodeCounter_old + ((i + 1) * (NumDivision2 + 1) + j) * (NumDivision3 + 1) + k + 1; // int Node4 = NodeCounter_old + ((i + 1) * (NumDivision2 + 1) + j + 1) * (NumDivision3 + 1) + k + 1; // int Node5 = Node1 + 1; // int Node6 = Node2 + 1; // int Node7 = Node3 + 1; // int Node8 = Node4 + 1; // if(postElementType == _TETRAHEDRA_) // { // // TODO: check if jacobian checking is necessary // } // else if(postElementType == _HEXAHEDRA_) // { // // TODO: check if jacobian checking is necessary // temp_element_nodes.clear(); // temp_element_nodes.push_back(*(FindKey(pModelPartPost->Nodes(), Node1, NodeKey).base())); // temp_element_nodes.push_back(*(FindKey(pModelPartPost->Nodes(), Node2, NodeKey).base())); // temp_element_nodes.push_back(*(FindKey(pModelPartPost->Nodes(), Node4, NodeKey).base())); // temp_element_nodes.push_back(*(FindKey(pModelPartPost->Nodes(), Node3, NodeKey).base())); // temp_element_nodes.push_back(*(FindKey(pModelPartPost->Nodes(), Node5, NodeKey).base())); // temp_element_nodes.push_back(*(FindKey(pModelPartPost->Nodes(), Node6, NodeKey).base())); // temp_element_nodes.push_back(*(FindKey(pModelPartPost->Nodes(), Node8, NodeKey).base())); // temp_element_nodes.push_back(*(FindKey(pModelPartPost->Nodes(), Node7, NodeKey).base())); // Element::Pointer NewElement = rCloneElement.Create(++ElementCounter, temp_element_nodes, pDummyProperties); // pModelPartPost->AddElement(NewElement); // mOldToNewElements[(*it)->Id()].insert(ElementCounter); // } // } // } // } std::vector<std::vector<IndexType> > connectivities; for(i = 0; i < NumDivision1; ++i) { for(j = 0; j < NumDivision2; ++j) { for(k = 0; k < NumDivision3; ++k) { IndexType Node1 = NodeCounter_old + (i * (NumDivision2 + 1) + j) * (NumDivision3 + 1) + k + 1; IndexType Node2 = NodeCounter_old + (i * (NumDivision2 + 1) + j + 1) * (NumDivision3 + 1) + k + 1; IndexType Node3 = NodeCounter_old + ((i + 1) * (NumDivision2 + 1) + j) * (NumDivision3 + 1) + k + 1; IndexType Node4 = NodeCounter_old + ((i + 1) * (NumDivision2 + 1) + j + 1) * (NumDivision3 + 1) + k + 1; IndexType Node5 = Node1 + 1; IndexType Node6 = Node2 + 1; IndexType Node7 = Node3 + 1; IndexType Node8 = Node4 + 1; if(postElementType == _TETRAHEDRA_) { // TODO: check if creating Tetrahedra is correct connectivities.push_back(std::vector<IndexType>{Node1, Node2, Node4}); connectivities.push_back(std::vector<IndexType>{Node1, Node4, Node3}); } else if(postElementType == _HEXAHEDRA_) { connectivities.push_back(std::vector<IndexType>{Node1, Node2, Node4, Node3, Node5, Node6, Node8, Node7}); } } } } ElementsArrayType pNewElements = IsogeometricPostUtility::CreateEntities<std::vector<std::vector<IndexType> >, Element, ElementsArrayType>( connectivities, *pModelPartPost, rCloneElement, ElementCounter, pDummyProperties, NodeKey); for (typename ElementsArrayType::ptr_iterator it2 = pNewElements.ptr_begin(); it2 != pNewElements.ptr_end(); ++it2) { pModelPartPost->AddElement(*it2); mOldToNewElements[(*it)->Id()].insert((*it2)->Id()); } pModelPartPost->Elements().Unique(); #ifdef DEBUG_LEVEL1 KRATOS_WATCH(pModelPartPost->Elements().size()) #endif } ++show_progress; } #ifdef ENABLE_PROFILING double end_compute = OpenMPUtils::GetCurrentTime(); std::cout << "GeneratePostModelPart completed: " << (end_compute - start_compute) << " s" << std::endl; #else std::cout << "GeneratePostModelPart completed" << std::endl; #endif std::cout << NodeCounter << " nodes and " << ElementCounter << " elements are created" << std::endl; } /// Generate the post model_part from reference model_part /// this is the improved version of GenerateModelPart /// which uses template function to generate post Elements for both Element and Condition void GenerateModelPart2(ModelPart::Pointer pModelPartPost, const bool& generate_for_condition) { #ifdef ENABLE_PROFILING double start_compute = OpenMPUtils::GetCurrentTime(); #endif #ifdef DEBUG_LEVEL1 std::cout << typeid(*this).name() << "::GenerateModelPart" << std::endl; #endif ElementsArrayType& pElements = mpModelPart->Elements(); ConditionsArrayType& pConditions = mpModelPart->Conditions(); std::string NodeKey = std::string("Node"); IndexType NodeCounter = 0; IndexType ElementCounter = 0; boost::progress_display show_progress( pElements.size() ); std::vector<std::size_t> dummy_ids; for (typename ElementsArrayType::ptr_iterator it = pElements.ptr_begin(); it != pElements.ptr_end(); ++it) { // This is wrong, we will not skill the IS_INACTIVE elements // TODO: to be deleted // if((*it)->GetValue( IS_INACTIVE )) // { //// std::cout << "Element " << (*it)->Id() << " is inactive" << std::endl; // ++show_progress; // continue; // } if((*it)->pGetGeometry() == 0) KRATOS_THROW_ERROR(std::logic_error, "Error: geometry is NULL at element", (*it)->Id()) int Dim = (*it)->GetGeometry().WorkingSpaceDimension(); // global dimension of the geometry that it works on int ReducedDim = (*it)->GetGeometry().Dimension(); // reduced dimension of the geometry IndexType NodeCounter_old = NodeCounter; #ifdef DEBUG_LEVEL1 KRATOS_WATCH(Dim) KRATOS_WATCH(ReducedDim) #endif //select the correct post element type std::string element_name; if((Dim == 2) && (ReducedDim == 2)) { element_name = std::string("KinematicLinear2D4N"); } else if((Dim == 3) && (ReducedDim == 2)) { element_name = std::string("KinematicLinear2D4N"); } else if((Dim == 3) && (ReducedDim == 3)) { element_name = std::string("KinematicLinear3D8N"); } else { std::stringstream ss; ss << "Invalid dimension of "; ss << typeid(*(*it)).name(); ss << ", Dim = " << Dim; ss << ", ReducedDim = " << ReducedDim; KRATOS_THROW_ERROR(std::logic_error, ss.str(), __FUNCTION__); } if(!KratosComponents<Element>::Has(element_name)) { std::stringstream buffer; buffer << "Element " << element_name << " is not registered in Kratos."; buffer << " Please check the spelling of the element name and see if the application which containing it, is registered corectly."; KRATOS_THROW_ERROR(std::runtime_error, buffer.str(), ""); } Element const& rCloneElement = KratosComponents<Element>::Get(element_name); GenerateForOneEntity<Element, ElementsArrayType, 1>(*pModelPartPost, *(*it), rCloneElement, NodeCounter_old, NodeCounter, ElementCounter, NodeKey, false, dummy_ids, dummy_ids, false); ++show_progress; } KRATOS_WATCH(ElementCounter) #ifdef DEBUG_LEVEL1 std::cout << "Done generating for elements" << std::endl; #endif IndexType ConditionCounter = 0; if (generate_for_condition) { boost::progress_display show_progress2( pConditions.size() ); for (typename ConditionsArrayType::ptr_iterator it = pConditions.ptr_begin(); it != pConditions.ptr_end(); ++it) { // This is wrong, we will not kill the IS_INACTIVE conditions // TODO: to be deleted // if((*it)->GetValue( IS_INACTIVE )) // { //// std::cout << "Condition " << (*it)->Id() << " is inactive" << std::endl; // ++show_progress2; // continue; // } if((*it)->pGetGeometry() == 0) KRATOS_THROW_ERROR(std::logic_error, "Error: geometry is NULL at condition", (*it)->Id()) int Dim = (*it)->GetGeometry().WorkingSpaceDimension(); // global dimension of the geometry that it works on int ReducedDim = (*it)->GetGeometry().Dimension(); // reduced dimension of the geometry IndexType NodeCounter_old = NodeCounter; #ifdef DEBUG_LEVEL1 KRATOS_WATCH(typeid((*it)->GetGeometry()).name()) KRATOS_WATCH(Dim) KRATOS_WATCH(ReducedDim) #endif //select the correct post condition type std::string condition_name; if(Dim == 3 && ReducedDim == 1) condition_name = std::string("LineForce3D2N"); else if(Dim == 3 && ReducedDim == 2) condition_name = std::string("FaceForce3D4N"); else { std::stringstream ss; ss << "Invalid dimension of "; ss << typeid(*(*it)).name(); ss << ", Dim = " << Dim; ss << ", ReducedDim = " << ReducedDim; ss << ". Condition " << (*it)->Id() << " will be skipped."; // KRATOS_THROW_ERROR(std::logic_error, ss.str(), __FUNCTION__); continue; } if(!KratosComponents<Condition>::Has(condition_name)) { std::stringstream buffer; buffer << "Condition " << condition_name << " is not registered in Kratos."; buffer << " Please check the spelling of the condition name and see if the application which containing it, is registered corectly."; KRATOS_THROW_ERROR(std::runtime_error, buffer.str(), ""); } Condition const& rCloneCondition = KratosComponents<Condition>::Get(condition_name); GenerateForOneEntity<Condition, ConditionsArrayType, 2>(*pModelPartPost, *(*it), rCloneCondition, NodeCounter_old, NodeCounter, ConditionCounter, NodeKey, false, dummy_ids, dummy_ids, false); ++show_progress2; } KRATOS_WATCH(ConditionCounter) } #ifdef ENABLE_PROFILING double end_compute = OpenMPUtils::GetCurrentTime(); std::cout << "GeneratePostModelPart2 completed: " << (end_compute - start_compute) << " s" << std::endl; #else std::cout << "GeneratePostModelPart2 completed" << std::endl; #endif std::cout << NodeCounter << " nodes and " << ElementCounter << " elements"; if (generate_for_condition) std::cout << ", " << ConditionCounter << " conditions"; std::cout << " are created" << std::endl; } // Generate the post model_part from reference model_part // this is the improved version of GenerateModelPart // which uses template function to generate post Elements for both Element and Condition // this version used a collapsing utility to collapse nodes automatically void GenerateModelPart2AutoCollapse(ModelPart::Pointer pModelPartPost, double dx, double dy, double dz, double tol) { #ifdef ENABLE_PROFILING double start_compute = OpenMPUtils::GetCurrentTime(); #endif #ifdef DEBUG_LEVEL1 std::cout << typeid(*this).name() << "::GenerateModelPart" << std::endl; #endif AutoCollapseSpatialBinning collapse_util(0.0, 0.0, 0.0, dx, dy, dz, tol); ElementsArrayType& pElements = mpModelPart->Elements(); ConditionsArrayType& pConditions = mpModelPart->Conditions(); std::string NodeKey = std::string("Node"); IndexType NodeCounter = 0; IndexType ElementCounter = 0; boost::progress_display show_progress( pElements.size() ); VectorMap<IndexType, IndexType> MapToCollapseNode; for (typename ElementsArrayType::ptr_iterator it = pElements.ptr_begin(); it != pElements.ptr_end(); ++it) { if((*it)->GetValue( IS_INACTIVE )) { // std::cout << "Element " << (*it)->Id() << " is inactive" << std::endl; ++show_progress; continue; } int Dim = (*it)->GetGeometry().WorkingSpaceDimension(); // global dimension of the geometry that it works on int ReducedDim = (*it)->GetGeometry().Dimension(); // reduced dimension of the geometry IndexType NodeCounter_old = NodeCounter; #ifdef DEBUG_LEVEL1 KRATOS_WATCH(Dim) KRATOS_WATCH(ReducedDim) #endif //select the correct post element type std::string element_name; if(Dim == 2 && ReducedDim == 2) element_name = std::string("KinematicLinear2D4N"); else if(Dim == 3 && ReducedDim == 3) element_name = std::string("KinematicLinear3D8N"); else { std::stringstream ss; ss << "Invalid dimension of "; ss << typeid(*(*it)).name(); ss << ", Dim = " << Dim; ss << ", ReducedDim = " << ReducedDim; KRATOS_THROW_ERROR(std::logic_error, ss.str(), __FUNCTION__); } if(!KratosComponents<Element>::Has(element_name)) { std::stringstream buffer; buffer << "Element " << element_name << " is not registered in Kratos."; buffer << " Please check the spelling of the element name and see if the application which containing it, is registered corectly."; KRATOS_THROW_ERROR(std::runtime_error, buffer.str(), ""); } Element const& rCloneElement = KratosComponents<Element>::Get(element_name); GenerateForOneEntityAutoCollapse<Element, ElementsArrayType, 1>(collapse_util, *pModelPartPost, *(*it), rCloneElement, MapToCollapseNode, NodeCounter_old, NodeCounter, ElementCounter, NodeKey); ++show_progress; } #ifdef DEBUG_LEVEL1 std::cout << "Done generating for elements" << std::endl; #endif IndexType ConditionCounter = 0; boost::progress_display show_progress2( pConditions.size() ); for (typename ConditionsArrayType::ptr_iterator it = pConditions.ptr_begin(); it != pConditions.ptr_end(); ++it) { if((*it)->GetValue( IS_INACTIVE )) { // std::cout << "Condition " << (*it)->Id() << " is inactive" << std::endl; ++show_progress2; continue; } int Dim = (*it)->GetGeometry().WorkingSpaceDimension(); // global dimension of the geometry that it works on int ReducedDim = (*it)->GetGeometry().Dimension(); // reduced dimension of the geometry IndexType NodeCounter_old = NodeCounter; #ifdef DEBUG_LEVEL1 KRATOS_WATCH(typeid((*it)->GetGeometry()).name()) KRATOS_WATCH(Dim) KRATOS_WATCH(ReducedDim) #endif //select the correct post condition type std::string condition_name; if(Dim == 3 && ReducedDim == 1) condition_name = std::string("LineForce3D2N"); else if(Dim == 3 && ReducedDim == 2) condition_name = std::string("FaceForce3D4N"); else { std::stringstream ss; ss << "Invalid dimension of "; ss << typeid(*(*it)).name(); ss << ", Dim = " << Dim; ss << ", ReducedDim = " << ReducedDim; KRATOS_THROW_ERROR(std::logic_error, ss.str(), __FUNCTION__); } if(!KratosComponents<Condition>::Has(condition_name)) { std::stringstream buffer; buffer << "Condition " << condition_name << " is not registered in Kratos."; buffer << " Please check the spelling of the condition name and see if the application which containing it, is registered corectly."; KRATOS_THROW_ERROR(std::runtime_error, buffer.str(), ""); } Condition const& rCloneCondition = KratosComponents<Condition>::Get(condition_name); GenerateForOneEntityAutoCollapse<Condition, ConditionsArrayType, 2>(collapse_util, *pModelPartPost, *(*it), rCloneCondition, MapToCollapseNode, NodeCounter_old, NodeCounter, ConditionCounter, NodeKey); ++show_progress2; } #ifdef ENABLE_PROFILING double end_compute = OpenMPUtils::GetCurrentTime(); std::cout << "Generate PostModelPart completed: " << (end_compute - start_compute) << " s" << std::endl; #else std::cout << "Generate PostModelPart completed" << std::endl; #endif std::cout << NodeCounter << " nodes and " << ElementCounter << " elements" << ", " << ConditionCounter << " conditions are created" << std::endl; } /** * Utility function to generate elements/conditions for element/condition * if TEntityType==Element, type must be 1; if T==Condition, type is 2 */ template<class TEntityType, class TEntityContainerType, std::size_t type> void GenerateForOneEntity(ModelPart& rModelPart, TEntityType& rE, TEntityType const& rSample, IndexType NodeCounter_old, IndexType& NodeCounter, IndexType& EntityCounter, const std::string& NodeKey, const bool& transfer_nodal_var, std::vector<std::size_t>& node_ids, std::vector<std::size_t>& element_ids, const bool& get_indices) { // int ReducedDim = rE.GetGeometry().WorkingSpaceDimension(); int ReducedDim = rE.GetGeometry().Dimension(); //get the properties Properties::Pointer pDummyProperties = rE.pGetProperties(); #ifdef DEBUG_LEVEL1 std::cout << "Generating for " << rE.Info() << std::endl; KRATOS_WATCH(*pDummyProperties) KRATOS_WATCH(EntityCounter) #endif // generate list of nodes if(ReducedDim == 1) { // TODO } else if(ReducedDim == 2) { IndexType NumDivision1 = static_cast<IndexType>( rE.GetValue(NUM_DIVISION_1) ); IndexType NumDivision2 = static_cast<IndexType>( rE.GetValue(NUM_DIVISION_2) ); IndexType i, j; CoordinatesArrayType p_ref; CoordinatesArrayType p; Vector shape_values; #ifdef DEBUG_LEVEL1 KRATOS_WATCH(NumDivision1) KRATOS_WATCH(NumDivision2) std::cout << "Generating Nodes..." << std::endl; #endif // create and add nodes p_ref[2] = 0.0; for(i = 0; i <= NumDivision1; ++i) { p_ref[0] = ((double) i) / NumDivision1; for(j = 0; j <= NumDivision2; ++j) { p_ref[1] = ((double) j) / NumDivision2; p = GlobalCoordinates(rE.GetGeometry(), p, p_ref); NodeType::Pointer pNewNode( new NodeType( 0, p ) ); pNewNode->SetId(++NodeCounter); // Giving model part's variables list to the node pNewNode->SetSolutionStepVariablesList(&rModelPart.GetNodalSolutionStepVariablesList()); //set buffer size pNewNode->SetBufferSize(rModelPart.GetBufferSize()); rModelPart.AddNode(pNewNode); if(type == 1) { mNodeToLocalCoordinates(pNewNode->Id()) = p_ref; mNodeToElement(pNewNode->Id()) = rE.Id(); } if (transfer_nodal_var) { shape_values = rE.GetGeometry().ShapeFunctionsValues(shape_values, p_ref); VariablesList& var_list = rModelPart.GetNodalSolutionStepVariablesList(); for (VariablesList::const_iterator it = var_list.begin(); it != var_list.end(); ++it) { if (typeid(*it) == typeid(Variable<double>)) { const Variable<double>& my_variable = dynamic_cast<const Variable<double>&>(*it); double value = 0.0; for (std::size_t n = 0; n < rE.GetGeometry().size(); ++n) value += shape_values[n] * rE.GetGeometry()[n].GetSolutionStepValue(my_variable); pNewNode->GetSolutionStepValue(my_variable) = value; } else if (typeid(*it) == typeid(Variable<array_1d<double, 3> >)) { const Variable<array_1d<double, 3> >& my_variable = dynamic_cast<const Variable<array_1d<double, 3> >&>(*it); array_1d<double, 3> value; noalias(value) = ZeroVector(3); for (std::size_t n = 0; n < rE.GetGeometry().size(); ++n) noalias(value) += shape_values[n] * rE.GetGeometry()[n].GetSolutionStepValue(my_variable); pNewNode->GetSolutionStepValue(my_variable) = value; } } } if (get_indices) node_ids.push_back(pNewNode->Id()); } } //for correct mapping to element, the repetitive node is allowed. // rModelPart.Nodes().Unique(); #ifdef DEBUG_LEVEL1 KRATOS_WATCH(rModelPart.Nodes().size()) if(type == 1) std::cout << "Generating Elements..." << std::endl; else std::cout << "Generating Conditions..." << std::endl; #endif // create and add element // typename T::NodesArrayType temp_nodes; // for(i = 0; i < NumDivision1; ++i) // { // for(j = 0; j < NumDivision2; ++j) // { // int Node1 = NodeCounter_old + i * (NumDivision2 + 1) + j + 1; // int Node2 = NodeCounter_old + i * (NumDivision2 + 1) + j + 2; // int Node3 = NodeCounter_old + (i + 1) * (NumDivision2 + 1) + j + 1; // int Node4 = NodeCounter_old + (i + 1) * (NumDivision2 + 1) + j + 2; // // TODO: check if jacobian checking is necessary // temp_nodes.clear(); // temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), Node1, NodeKey).base())); // temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), Node2, NodeKey).base())); // temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), Node4, NodeKey).base())); // temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), Node3, NodeKey).base())); // int NewEntityId = ++EntityCounter; // // int NewEntityId = rE.Id(); ++EntityCounter; // typename TEntityType::Pointer NewEntity = rSample.Create(NewEntityId, temp_nodes, pDummyProperties); // AddToModelPart<TEntityType>(rModelPart, NewEntity); // if(type == 1) // mOldToNewElements[rE.Id()].insert(NewEntityId); // else if(type == 2) // mOldToNewConditions[rE.Id()].insert(NewEntityId); // } // } std::vector<std::vector<IndexType> > connectivities; for(i = 0; i < NumDivision1; ++i) { for(j = 0; j < NumDivision2; ++j) { IndexType Node1 = NodeCounter_old + i * (NumDivision2 + 1) + j + 1; IndexType Node2 = NodeCounter_old + i * (NumDivision2 + 1) + j + 2; IndexType Node3 = NodeCounter_old + (i + 1) * (NumDivision2 + 1) + j + 1; IndexType Node4 = NodeCounter_old + (i + 1) * (NumDivision2 + 1) + j + 2; connectivities.push_back(std::vector<IndexType>{Node1, Node2, Node4, Node3}); } } TEntityContainerType pNewEntities = IsogeometricPostUtility::CreateEntities<std::vector<std::vector<IndexType> >, TEntityType, TEntityContainerType>( connectivities, rModelPart, rSample, EntityCounter, pDummyProperties, NodeKey); for (typename TEntityContainerType::ptr_iterator it2 = pNewEntities.ptr_begin(); it2 != pNewEntities.ptr_end(); ++it2) { AddToModelPart<TEntityType>(rModelPart, *it2); if(type == 1) mOldToNewElements[rE.Id()].insert((*it2)->Id()); else if(type == 2) mOldToNewConditions[rE.Id()].insert((*it2)->Id()); } if(type == 1) rModelPart.Elements().Unique(); else if(type == 2) rModelPart.Conditions().Unique(); #ifdef DEBUG_LEVEL1 if(type == 1) KRATOS_WATCH(rModelPart.Elements().size()) else KRATOS_WATCH(rModelPart.Conditions().size()) #endif if (get_indices) { for (typename TEntityContainerType::ptr_iterator it2 = pNewEntities.ptr_begin(); it2 != pNewEntities.ptr_end(); ++it2) { element_ids.push_back((*it2)->Id()); } } } else if(ReducedDim == 3) { IndexType NumDivision1 = static_cast<IndexType>( rE.GetValue(NUM_DIVISION_1) ); IndexType NumDivision2 = static_cast<IndexType>( rE.GetValue(NUM_DIVISION_2) ); IndexType NumDivision3 = static_cast<IndexType>( rE.GetValue(NUM_DIVISION_3) ); IndexType i, j, k; CoordinatesArrayType p_ref; CoordinatesArrayType p; Vector shape_values; #ifdef DEBUG_LEVEL1 KRATOS_WATCH(rE.Id()) KRATOS_WATCH(NumDivision1) KRATOS_WATCH(NumDivision2) KRATOS_WATCH(NumDivision3) std::cout << "Generating Nodes..." << std::endl; #endif // create and add nodes for(i = 0; i <= NumDivision1; ++i) { p_ref[0] = ((double) i) / NumDivision1; for(j = 0; j <= NumDivision2; ++j) { p_ref[1] = ((double) j) / NumDivision2; for(k = 0; k <= NumDivision3; ++k) { p_ref[2] = ((double) k) / NumDivision3; p = GlobalCoordinates(rE.GetGeometry(), p, p_ref); NodeType::Pointer pNewNode( new NodeType( 0, p ) ); pNewNode->SetId(++NodeCounter); #ifdef DEBUG_GENERATE_MESH if(NodeCounter) { std::cout << "Node " << NodeCounter << " p_ref: " << p_ref << ", p: " << p << std::endl; } #endif // Giving model part's variables list to the node pNewNode->SetSolutionStepVariablesList(&rModelPart.GetNodalSolutionStepVariablesList()); //set buffer size pNewNode->SetBufferSize(rModelPart.GetBufferSize()); rModelPart.AddNode(pNewNode); if(type == 1) { mNodeToLocalCoordinates(pNewNode->Id()) = p_ref; mNodeToElement(pNewNode->Id()) = rE.Id(); } if (transfer_nodal_var) { shape_values = rE.GetGeometry().ShapeFunctionsValues(shape_values, p_ref); VariablesList& var_list = rModelPart.GetNodalSolutionStepVariablesList(); for (VariablesList::const_iterator it = var_list.begin(); it != var_list.end(); ++it) { if (typeid(*it) == typeid(Variable<double>)) { const Variable<double>& my_variable = dynamic_cast<const Variable<double>&>(*it); double value = 0.0; for (std::size_t n = 0; n < rE.GetGeometry().size(); ++n) value += shape_values[n] * rE.GetGeometry()[n].GetSolutionStepValue(my_variable); pNewNode->GetSolutionStepValue(my_variable) = value; } else if (typeid(*it) == typeid(Variable<array_1d<double, 3> >)) { const Variable<array_1d<double, 3> >& my_variable = dynamic_cast<const Variable<array_1d<double, 3> >&>(*it); array_1d<double, 3> value; noalias(value) = ZeroVector(3); for (std::size_t n = 0; n < rE.GetGeometry().size(); ++n) noalias(value) += shape_values[n] * rE.GetGeometry()[n].GetSolutionStepValue(my_variable); pNewNode->GetSolutionStepValue(my_variable) = value; } } } if (get_indices) node_ids.push_back(pNewNode->Id()); } } } //for correct mapping to element, the repetitive node is allowed. // rModelPart.Nodes().Unique(); #ifdef DEBUG_LEVEL1 KRATOS_WATCH(rModelPart.Nodes().size()) if(type == 1) std::cout << "Generating Elements..." << std::endl; else std::cout << "Generating Conditions..." << std::endl; #endif // create and add element // typename T::NodesArrayType temp_nodes; // for(i = 0; i < NumDivision1; ++i) // { // for(j = 0; j < NumDivision2; ++j) // { // for(k = 0; k < NumDivision3; ++k) // { // int Node1 = NodeCounter_old + (i * (NumDivision2 + 1) + j) * (NumDivision3 + 1) + k + 1; // int Node2 = NodeCounter_old + (i * (NumDivision2 + 1) + j + 1) * (NumDivision3 + 1) + k + 1; // int Node3 = NodeCounter_old + ((i + 1) * (NumDivision2 + 1) + j) * (NumDivision3 + 1) + k + 1; // int Node4 = NodeCounter_old + ((i + 1) * (NumDivision2 + 1) + j + 1) * (NumDivision3 + 1) + k + 1; // int Node5 = Node1 + 1; // int Node6 = Node2 + 1; // int Node7 = Node3 + 1; // int Node8 = Node4 + 1; // // TODO: check if jacobian checking is necessary // temp_nodes.clear(); // temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), Node1, NodeKey).base())); // temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), Node2, NodeKey).base())); // temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), Node4, NodeKey).base())); // temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), Node3, NodeKey).base())); // temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), Node5, NodeKey).base())); // temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), Node6, NodeKey).base())); // temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), Node8, NodeKey).base())); // temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), Node7, NodeKey).base())); // int NewEntityId = ++EntityCounter; // typename TEntityType::Pointer NewEntity = rSample.Create(NewEntityId, temp_nodes, pDummyProperties); // AddToModelPart<TEntityType>(rModelPart, NewEntity); // if(type == 1) // mOldToNewElements[rE.Id()].insert(NewEntityId); // else if(type == 2) // mOldToNewConditions[rE.Id()].insert(NewEntityId); // } // } // } std::vector<std::vector<IndexType> > connectivities; for(i = 0; i < NumDivision1; ++i) { for(j = 0; j < NumDivision2; ++j) { for(k = 0; k < NumDivision3; ++k) { IndexType Node1 = NodeCounter_old + (i * (NumDivision2 + 1) + j) * (NumDivision3 + 1) + k + 1; IndexType Node2 = NodeCounter_old + (i * (NumDivision2 + 1) + j + 1) * (NumDivision3 + 1) + k + 1; IndexType Node3 = NodeCounter_old + ((i + 1) * (NumDivision2 + 1) + j) * (NumDivision3 + 1) + k + 1; IndexType Node4 = NodeCounter_old + ((i + 1) * (NumDivision2 + 1) + j + 1) * (NumDivision3 + 1) + k + 1; IndexType Node5 = Node1 + 1; IndexType Node6 = Node2 + 1; IndexType Node7 = Node3 + 1; IndexType Node8 = Node4 + 1; connectivities.push_back(std::vector<IndexType>{Node1, Node2, Node4, Node3, Node5, Node6, Node8, Node7}); } } } TEntityContainerType pNewEntities = IsogeometricPostUtility::CreateEntities<std::vector<std::vector<IndexType> >, TEntityType, TEntityContainerType>( connectivities, rModelPart, rSample, EntityCounter, pDummyProperties, NodeKey); for (typename TEntityContainerType::ptr_iterator it2 = pNewEntities.ptr_begin(); it2 != pNewEntities.ptr_end(); ++it2) { AddToModelPart<TEntityType>(rModelPart, *it2); if(type == 1) mOldToNewElements[rE.Id()].insert((*it2)->Id()); else if(type == 2) mOldToNewConditions[rE.Id()].insert((*it2)->Id()); } if(type == 1) rModelPart.Elements().Unique(); else if(type == 2) rModelPart.Conditions().Unique(); #ifdef DEBUG_LEVEL1 if(type == 1) KRATOS_WATCH(rModelPart.Elements().size()) else KRATOS_WATCH(rModelPart.Conditions().size()) #endif if (get_indices) { for (typename TEntityContainerType::ptr_iterator it2 = pNewEntities.ptr_begin(); it2 != pNewEntities.ptr_end(); ++it2) { element_ids.push_back((*it2)->Id()); } } } } /** * Utility function to generate elements/conditions for element/condition. * This uses a collapse utility to automatically merge the coincident nodes * if T==Element, type must be 1; otherwise type=2 */ template<class TEntityType, class TEntityContainerType, std::size_t type> void GenerateForOneEntityAutoCollapse(AutoCollapseSpatialBinning& collapse_util, ModelPart& rModelPart, TEntityType& rE, TEntityType const& rSample, VectorMap<IndexType, IndexType>& rMapToCollapseNode, IndexType NodeCounter_old, IndexType& NodeCounter, IndexType& EntityCounter, const std::string& NodeKey) { // int ReducedDim = rE.GetGeometry().WorkingSpaceDimension(); int ReducedDim = rE.GetGeometry().Dimension(); //get the properties Properties::Pointer pDummyProperties = rE.pGetProperties(); #ifdef DEBUG_LEVEL1 if(type == 1) std::cout << "Generating for element " << rE.Id() << std::endl; else std::cout << "Generating for condition " << rE.Id() << std::endl; KRATOS_WATCH(*pDummyProperties) #endif // generate list of nodes if(ReducedDim == 1) { // TODO } else if(ReducedDim == 2) { IndexType NumDivision1 = static_cast<IndexType>( rE.GetValue(NUM_DIVISION_1) ); IndexType NumDivision2 = static_cast<IndexType>( rE.GetValue(NUM_DIVISION_2) ); IndexType i, j; CoordinatesArrayType p_ref; CoordinatesArrayType p; #ifdef DEBUG_LEVEL1 KRATOS_WATCH(NumDivision1) KRATOS_WATCH(NumDivision2) std::cout << "Generating Nodes..." << std::endl; #endif // create and add nodes p_ref[2] = 0.0; for(i = 0; i <= NumDivision1; ++i) { p_ref[0] = ((double) i) / NumDivision1; for(j = 0; j <= NumDivision2; ++j) { p_ref[1] = ((double) j) / NumDivision2; p = GlobalCoordinates(rE.GetGeometry(), p, p_ref); IndexType id = static_cast<IndexType>( collapse_util.AddNode(p[0], p[1], p[2]) ); ++NodeCounter; rMapToCollapseNode[NodeCounter] = id; if(rModelPart.Nodes().find(id) == rModelPart.Nodes().end()) { // this is a new node NodeType::Pointer pNewNode( new NodeType( 0, p ) ); pNewNode->SetId(id); // Giving model part's variables list to the node pNewNode->SetSolutionStepVariablesList(&rModelPart.GetNodalSolutionStepVariablesList()); //set buffer size pNewNode->SetBufferSize(rModelPart.GetBufferSize()); rModelPart.AddNode(pNewNode); } else { // this is an old node, not required to add to model_part // so do nothing } // in this way, the node will always point to the last local coodinates and element if(type == 1) { mNodeToLocalCoordinates(id) = p_ref; mNodeToElement(id) = rE.Id(); } } } //for correct mapping to element, the repetitive node is allowed. // rModelPart.Nodes().Unique(); #ifdef DEBUG_LEVEL1 KRATOS_WATCH(rModelPart.Nodes().size()) if(type == 1) std::cout << "Generating Elements..." << std::endl; else std::cout << "Generating Conditions..." << std::endl; #endif // create and add element // typename T::NodesArrayType temp_nodes; // for(i = 0; i < NumDivision1; ++i) // { // for(j = 0; j < NumDivision2; ++j) // { // int Node1 = NodeCounter_old + i * (NumDivision2 + 1) + j + 1; // int Node2 = NodeCounter_old + i * (NumDivision2 + 1) + j + 2; // int Node3 = NodeCounter_old + (i + 1) * (NumDivision2 + 1) + j + 1; // int Node4 = NodeCounter_old + (i + 1) * (NumDivision2 + 1) + j + 2; // // TODO: check if jacobian checking is necessary // temp_nodes.clear(); // temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), rMapToCollapseNode[Node1], NodeKey).base())); // temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), rMapToCollapseNode[Node2], NodeKey).base())); // temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), rMapToCollapseNode[Node4], NodeKey).base())); // temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), rMapToCollapseNode[Node3], NodeKey).base())); // typename T::Pointer NewEntity = rSample.Create(++EntityCounter, temp_nodes, pDummyProperties); // AddToModelPart<T>(rModelPart, NewEntity); // if(type == 1) // mOldToNewElements[rE.Id()].insert(EntityCounter); // else if(type == 2) // mOldToNewConditions[rE.Id()].insert(EntityCounter); // } // } std::vector<std::vector<IndexType> > connectivities; for(i = 0; i < NumDivision1; ++i) { for(j = 0; j < NumDivision2; ++j) { IndexType Node1 = NodeCounter_old + i * (NumDivision2 + 1) + j + 1; IndexType Node2 = NodeCounter_old + i * (NumDivision2 + 1) + j + 2; IndexType Node3 = NodeCounter_old + (i + 1) * (NumDivision2 + 1) + j + 1; IndexType Node4 = NodeCounter_old + (i + 1) * (NumDivision2 + 1) + j + 2; connectivities.push_back(std::vector<IndexType>{ rMapToCollapseNode[Node1], rMapToCollapseNode[Node2], rMapToCollapseNode[Node4], rMapToCollapseNode[Node3]}); } } TEntityContainerType pNewEntities = IsogeometricPostUtility::CreateEntities<std::vector<std::vector<IndexType> >, TEntityType, TEntityContainerType>( connectivities, rModelPart, rSample, EntityCounter, pDummyProperties, NodeKey); for (typename TEntityContainerType::ptr_iterator it2 = pNewEntities.ptr_begin(); it2 != pNewEntities.ptr_end(); ++it2) { AddToModelPart<TEntityType>(rModelPart, *it2); if(type == 1) mOldToNewElements[rE.Id()].insert((*it2)->Id()); else if(type == 2) mOldToNewConditions[rE.Id()].insert((*it2)->Id()); } if(type == 1) rModelPart.Elements().Unique(); else if(type == 2) rModelPart.Conditions().Unique(); #ifdef DEBUG_LEVEL1 if(type == 1) KRATOS_WATCH(rModelPart.Elements().size()) else KRATOS_WATCH(rModelPart.Conditions().size()) #endif } else if(ReducedDim == 3) { IndexType NumDivision1 = static_cast<IndexType>( rE.GetValue(NUM_DIVISION_1) ); IndexType NumDivision2 = static_cast<IndexType>( rE.GetValue(NUM_DIVISION_2) ); IndexType NumDivision3 = static_cast<IndexType>( rE.GetValue(NUM_DIVISION_3) ); IndexType i, j, k; CoordinatesArrayType p_ref; CoordinatesArrayType p; #ifdef DEBUG_LEVEL1 KRATOS_WATCH(rE.Id()) KRATOS_WATCH(NumDivision1) KRATOS_WATCH(NumDivision2) KRATOS_WATCH(NumDivision3) std::cout << "Generating Nodes..." << std::endl; #endif // create and add nodes for(i = 0; i <= NumDivision1; ++i) { p_ref[0] = ((double) i) / NumDivision1; for(j = 0; j <= NumDivision2; ++j) { p_ref[1] = ((double) j) / NumDivision2; for(k = 0; k <= NumDivision3; ++k) { p_ref[2] = ((double) k) / NumDivision3; p = GlobalCoordinates(rE.GetGeometry(), p, p_ref); IndexType id = static_cast<IndexType>( collapse_util.AddNode(p[0], p[1], p[2]) ); ++NodeCounter; rMapToCollapseNode[NodeCounter] = id; if(rModelPart.Nodes().find(id) == rModelPart.Nodes().end()) { // this is a new node NodeType::Pointer pNewNode( new NodeType( 0, p ) ); pNewNode->SetId(id); #ifdef DEBUG_GENERATE_MESH if(NodeCounter) { std::cout << "Node " << NodeCounter << " p_ref: " << p_ref << ", p: " << p << std::endl; } #endif // Giving model part's variables list to the node pNewNode->SetSolutionStepVariablesList(&rModelPart.GetNodalSolutionStepVariablesList()); //set buffer size pNewNode->SetBufferSize(rModelPart.GetBufferSize()); rModelPart.AddNode(pNewNode); } else { // this is an old node, not required to add to model_part // so do nothing } // in this way, the node will always point to the last local coodinates and element if(type == 1) { mNodeToLocalCoordinates(id) = p_ref; mNodeToElement(id) = rE.Id(); } } } } //for correct mapping to element, the repetitive node is allowed. // rModelPart.Nodes().Unique(); #ifdef DEBUG_LEVEL1 KRATOS_WATCH(rModelPart.Nodes().size()) if(type == 1) std::cout << "Generating Elements..." << std::endl; else std::cout << "Generating Conditions..." << std::endl; #endif // create and add element // typename T::NodesArrayType temp_nodes; // for(i = 0; i < NumDivision1; ++i) // { // for(j = 0; j < NumDivision2; ++j) // { // for(k = 0; k < NumDivision3; ++k) // { // int Node1 = NodeCounter_old + (i * (NumDivision2 + 1) + j) * (NumDivision3 + 1) + k + 1; // int Node2 = NodeCounter_old + (i * (NumDivision2 + 1) + j + 1) * (NumDivision3 + 1) + k + 1; // int Node3 = NodeCounter_old + ((i + 1) * (NumDivision2 + 1) + j) * (NumDivision3 + 1) + k + 1; // int Node4 = NodeCounter_old + ((i + 1) * (NumDivision2 + 1) + j + 1) * (NumDivision3 + 1) + k + 1; // int Node5 = Node1 + 1; // int Node6 = Node2 + 1; // int Node7 = Node3 + 1; // int Node8 = Node4 + 1; // // TODO: check if jacobian checking is necessary // temp_nodes.clear(); // temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), rMapToCollapseNode[Node1], NodeKey).base())); // temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), rMapToCollapseNode[Node2], NodeKey).base())); // temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), rMapToCollapseNode[Node4], NodeKey).base())); // temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), rMapToCollapseNode[Node3], NodeKey).base())); // temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), rMapToCollapseNode[Node5], NodeKey).base())); // temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), rMapToCollapseNode[Node6], NodeKey).base())); // temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), rMapToCollapseNode[Node8], NodeKey).base())); // temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), rMapToCollapseNode[Node7], NodeKey).base())); // typename T::Pointer NewEntity = rSample.Create(++EntityCounter, temp_nodes, pDummyProperties); // AddToModelPart<T>(rModelPart, NewEntity); // if(type == 1) // mOldToNewElements[rE.Id()].insert(EntityCounter); // else if(type == 2) // mOldToNewConditions[rE.Id()].insert(EntityCounter); // } // } // } std::vector<std::vector<IndexType> > connectivities; for(i = 0; i < NumDivision1; ++i) { for(j = 0; j < NumDivision2; ++j) { for(k = 0; k < NumDivision3; ++k) { IndexType Node1 = NodeCounter_old + (i * (NumDivision2 + 1) + j) * (NumDivision3 + 1) + k + 1; IndexType Node2 = NodeCounter_old + (i * (NumDivision2 + 1) + j + 1) * (NumDivision3 + 1) + k + 1; IndexType Node3 = NodeCounter_old + ((i + 1) * (NumDivision2 + 1) + j) * (NumDivision3 + 1) + k + 1; IndexType Node4 = NodeCounter_old + ((i + 1) * (NumDivision2 + 1) + j + 1) * (NumDivision3 + 1) + k + 1; IndexType Node5 = Node1 + 1; IndexType Node6 = Node2 + 1; IndexType Node7 = Node3 + 1; IndexType Node8 = Node4 + 1; connectivities.push_back(std::vector<IndexType>{ rMapToCollapseNode[Node1], rMapToCollapseNode[Node2], rMapToCollapseNode[Node4], rMapToCollapseNode[Node3], rMapToCollapseNode[Node5], rMapToCollapseNode[Node6], rMapToCollapseNode[Node8], rMapToCollapseNode[Node7]}); } } } TEntityContainerType pNewEntities = IsogeometricPostUtility::CreateEntities<std::vector<std::vector<IndexType> >, TEntityType, TEntityContainerType>( connectivities, rModelPart, rSample, EntityCounter, pDummyProperties, NodeKey); for (typename TEntityContainerType::ptr_iterator it2 = pNewEntities.ptr_begin(); it2 != pNewEntities.ptr_end(); ++it2) { AddToModelPart<TEntityType>(rModelPart, *it2); if(type == 1) mOldToNewElements[rE.Id()].insert((*it2)->Id()); else if(type == 2) mOldToNewConditions[rE.Id()].insert((*it2)->Id()); } if(type == 1) rModelPart.Elements().Unique(); else if(type == 2) rModelPart.Conditions().Unique(); #ifdef DEBUG_LEVEL1 if(type == 1) KRATOS_WATCH(rModelPart.Elements().size()) else KRATOS_WATCH(rModelPart.Conditions().size()) #endif } } // Synchronize the activation between model_parts void SynchronizeActivation(ModelPart::Pointer pModelPartPost) { ElementsArrayType& pElements = mpModelPart->Elements(); for (typename ElementsArrayType::ptr_iterator it = pElements.ptr_begin(); it != pElements.ptr_end(); ++it) { std::set<IndexType> NewElements = mOldToNewElements[(*it)->Id()]; for(std::set<IndexType>::iterator it2 = NewElements.begin(); it2 != NewElements.end(); ++it2) { pModelPartPost->GetElement(*it2).GetValue(IS_INACTIVE) = (*it)->GetValue( IS_INACTIVE ); } } ConditionsArrayType& pConditions = mpModelPart->Conditions(); for (typename ConditionsArrayType::ptr_iterator it = pConditions.ptr_begin(); it != pConditions.ptr_end(); ++it) { std::set<IndexType> NewConditions = mOldToNewConditions[(*it)->Id()]; for(std::set<IndexType>::iterator it2 = NewConditions.begin(); it2 != NewConditions.end(); ++it2) { pModelPartPost->GetCondition(*it2).GetValue(IS_INACTIVE) = (*it)->GetValue( IS_INACTIVE ); } } } // transfer the elemental data template<class TVariableType> void TransferElementalData(const TVariableType& rThisVariable, ModelPart::Pointer pModelPartPost) { ElementsArrayType& pElements = mpModelPart->Elements(); for(typename ElementsArrayType::ptr_iterator it = pElements.ptr_begin(); it != pElements.ptr_end(); ++it) { std::set<IndexType> NewElements = mOldToNewElements[(*it)->Id()]; for(std::set<IndexType>::iterator it2 = NewElements.begin(); it2 != NewElements.end(); ++it2) { pModelPartPost->GetElement(*it2).GetValue(rThisVariable) = (*it)->GetValue(rThisVariable); } } } // transfer the conditional data template<class TVariableType> void TransferConditionalData(const TVariableType& rThisVariable, ModelPart::Pointer pModelPartPost) { ConditionsArrayType& pConditions = mpModelPart->Conditions(); for(typename ConditionsArrayType::ptr_iterator it = pConditions.ptr_begin(); it != pConditions.ptr_end(); ++it) { std::set<IndexType> NewConditions = mOldToNewConditions[(*it)->Id()]; for(std::set<IndexType>::iterator it2 = NewConditions.begin(); it2 != NewConditions.end(); ++it2) { pModelPartPost->GetCondition(*it2).GetValue(rThisVariable) = (*it)->GetValue(rThisVariable); } } } // Synchronize post model_part with the reference model_part template<class TVariableType> void TransferNodalResults( const TVariableType& rThisVariable, const ModelPart::Pointer pModelPartPost ) { #ifdef ENABLE_PROFILING double start_compute = OpenMPUtils::GetCurrentTime(); #endif NodesArrayType& pTargetNodes = pModelPartPost->Nodes(); ElementsArrayType& pElements = mpModelPart->Elements(); typename TVariableType::Type Results; CoordinatesArrayType LocalPos; IndexType ElementId; // #pragma omp parallel for //TODO: check this. This is not parallelized. for(NodesArrayType::ptr_iterator it = pTargetNodes.ptr_begin(); it != pTargetNodes.ptr_end(); ++it) { IndexType key = (*it)->Id(); if(mNodeToElement.find(key) != mNodeToElement.end()) { ElementId = mNodeToElement[key]; if( ! pElements(ElementId)->GetValue(IS_INACTIVE) ) // skip the inactive elements { noalias(LocalPos) = mNodeToLocalCoordinates[key]; Results = CalculateOnPoint(rThisVariable, Results, pElements(ElementId), LocalPos); (*it)->GetSolutionStepValue(rThisVariable) = Results; } } } #ifdef ENABLE_PROFILING double end_compute = OpenMPUtils::GetCurrentTime(); std::cout << "Transfer nodal point results for " << rThisVariable.Name() << " completed: " << end_compute - start_compute << " s" << std::endl; #endif } // Synchronize post model_part with the reference model_part template<class TVariableType> void TransferIntegrationPointResults( const TVariableType& rThisVariable, const ModelPart::Pointer pModelPartPost, LinearSolverType::Pointer pSolver ) { #ifdef ENABLE_PROFILING double start_compute = OpenMPUtils::GetCurrentTime(); std::cout << "########################################" << std::endl; std::cout << "Transfer integration point results for " << rThisVariable.Name() << " starts" << std::endl; #endif // firstly transfer rThisVariable from integration points of reference model_part to its nodes TransferVariablesToNodes(pSolver, mpModelPart, rThisVariable); // secondly transfer new nodal variables results to the post model_part TransferNodalResults(rThisVariable, pModelPartPost); #ifdef ENABLE_PROFILING double end_compute = OpenMPUtils::GetCurrentTime(); std::cout << "Transfer integration point results for " << rThisVariable.Name() << " completed: " << end_compute - start_compute << "s" << std::endl; std::cout << "########################################" << std::endl; #endif } // Transfer the variable to nodes for model_part template<class TVariableType> void TransferVariablesToNodes( const TVariableType& rThisVariable, ModelPart::Pointer pModelPart, LinearSolverType::Pointer pSolver ) { #ifdef ENABLE_PROFILING double start_compute = OpenMPUtils::GetCurrentTime(); std::cout << "########################################" << std::endl; std::cout << "Transfer integration point results to nodes for " << rThisVariable.Name() << " starts" << std::endl; #endif TransferVariablesToNodes(pSolver, pModelPart, rThisVariable); #ifdef ENABLE_PROFILING double end_compute = OpenMPUtils::GetCurrentTime(); std::cout << "Transfer integration point results to nodes for " << rThisVariable.Name() << " completed: " << end_compute - start_compute << "s" << std::endl; std::cout << "########################################" << std::endl; #endif } /** * Utility function to renumber the nodes of the post model_part (for parallel merge) */ void GlobalNodalRenumbering(ModelPart::Pointer pModelPartPost) { #ifdef ISOGEOMETRIC_USE_MPI int rank, size; MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &size); // gather the number of nodes on each process int NumberOfNodes[size]; int MyNumberOfNodes = pModelPartPost->NumberOfNodes(); MPI_Allgather(&MyNumberOfNodes, 1, MPI_INT, NumberOfNodes, 1, MPI_INT, MPI_COMM_WORLD); // std::cout << "NumberOfNodes:"; // for(int i = 0; i < size; ++i) // std::cout << " " << NumberOfNodes[i]; // std::cout << std::endl; // compute the numbering offset int offset = 0; for(int i = 0; i < rank; ++i) offset += NumberOfNodes[i]; // renumber the nodes of the current process for(ModelPart::NodeIterator it = pModelPartPost->NodesBegin(); it != pModelPartPost->NodesEnd(); ++it) { it->SetId(++offset); it->GetSolutionStepValue(PARTITION_INDEX) = rank; } if(rank == 0) std::cout << "Global renumbering completed" << std::endl; #endif } ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a string. virtual std::string Info() const { std::stringstream buffer; buffer << "BezierClassicalPostUtility"; return buffer.str(); } /// Print information about this object. virtual void PrintInfo(std::ostream& rOStream) const { rOStream << "BezierClassicalPostUtility"; } /// Print object's data. virtual void PrintData(std::ostream& rOStream) const {} ///@} ///@name Friends ///@{ ///@} protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ ModelPart::Pointer mpModelPart; // pointer variable to a model_part VectorMap<IndexType, CoordinatesArrayType> mNodeToLocalCoordinates; // vector map to store local coordinates of node on a NURBS entity VectorMap<IndexType, IndexType> mNodeToElement; // vector map to store local coordinates of node on a NURBS entity std::map<IndexType, std::set<IndexType> > mOldToNewElements; // vector map to store id map from old element to new elements std::map<IndexType, std::set<IndexType> > mOldToNewConditions; // vector map to store id map from old condition to new conditions ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ /** * Calculate global coodinates w.r.t initial configuration */ CoordinatesArrayType& GlobalCoordinates( GeometryType& rGeometry, CoordinatesArrayType& rResult, CoordinatesArrayType const& LocalCoordinates ) { noalias( rResult ) = ZeroVector( 3 ); Vector ShapeFunctionsValues; rGeometry.ShapeFunctionsValues(ShapeFunctionsValues, LocalCoordinates); for ( IndexType i = 0 ; i < rGeometry.size() ; ++i ) { noalias( rResult ) += ShapeFunctionsValues( i ) * rGeometry.GetPoint( i ).GetInitialPosition(); } return rResult; } /** * Interpolation on element */ double CalculateOnPoint( const Variable<double>& rVariable, double& rResult, Element::Pointer& pElement, const CoordinatesArrayType& rCoordinates ) { Vector N; pElement->GetGeometry().ShapeFunctionsValues(N, rCoordinates); rResult = 0.0; for(unsigned int i = 0; i < pElement->GetGeometry().size(); ++i) { double NodalValues = pElement->GetGeometry()[i].GetSolutionStepValue(rVariable); rResult += N( i ) * NodalValues; } return rResult; } /** * Interpolation on element */ Vector& CalculateOnPoint( const Variable<Vector>& rVariable, Vector& rResult, Element::Pointer& pElement, const CoordinatesArrayType& rCoordinates ) { Vector N; pElement->GetGeometry().ShapeFunctionsValues(N, rCoordinates); for(unsigned int i = 0; i < pElement->GetGeometry().size(); ++i) { Vector& NodalValues = pElement->GetGeometry()[i].GetSolutionStepValue(rVariable); if(i == 0) { rResult = N( i ) * NodalValues; } else { noalias(rResult) += N( i ) * NodalValues; } } return rResult; } /** * Interpolation on element */ array_1d<double, 3>& CalculateOnPoint( const Variable<array_1d<double, 3> >& rVariable, array_1d<double, 3>& rResult, Element::Pointer& pElement, const CoordinatesArrayType& rCoordinates ) { Vector N; pElement->GetGeometry().ShapeFunctionsValues(N, rCoordinates); rResult[0] = 0.0; rResult[1] = 0.0; rResult[2] = 0.0; for(unsigned int i = 0; i < pElement->GetGeometry().size(); ++i) { array_1d<double, 3> NodalValues = pElement->GetGeometry()[i].GetSolutionStepValue(rVariable); rResult += N( i ) * NodalValues; } return rResult; } /** * Transfer variable at integration points to nodes * * @param pSolver the solver used for solving the local system matrix * @param pModelPart pointer to model_part that we wish to transfer the result from its integration points to its nodes * @param rThisVariable the variable need to transfer the respected values */ void TransferVariablesToNodes( LinearSolverType::Pointer& pSolver, ModelPart::Pointer& pModelPart, const Variable<double>& rThisVariable ) { ElementsArrayType& ElementsArray= pModelPart->Elements(); //Initialize system of equations int NumberOfNodes = pModelPart->NumberOfNodes(); SerialSparseSpaceType::MatrixType M(NumberOfNodes, NumberOfNodes); noalias(M)= ZeroMatrix(NumberOfNodes, NumberOfNodes); SerialSparseSpaceType::VectorType g(NumberOfNodes); noalias(g)= ZeroVector(NumberOfNodes); SerialSparseSpaceType::VectorType b(NumberOfNodes); noalias(b)= ZeroVector(NumberOfNodes); // create the structure for M a priori ConstructL2MatrixStructure<Element>(M, ElementsArray); // Transfer of GaussianVariables to Nodal Variables via L_2-Minimization // see Jiao + Heath "Common-refinement-based data tranfer ..." // International Journal for numerical methods in engineering 61 (2004) 2402--2427 // for general description of L_2-Minimization // set up the system of equations //create a partition of the element array int number_of_threads = omp_get_max_threads(); std::vector<unsigned int> element_partition; OpenMPUtils::CreatePartition(number_of_threads, ElementsArray.size(), element_partition); KRATOS_WATCH( number_of_threads ) std::cout << "element_partition:"; for (std::size_t i = 0; i < element_partition.size(); ++i) std::cout << " " << element_partition[i]; std::cout << std::endl; //create the array of lock std::vector< omp_lock_t > lock_array(NumberOfNodes); for(unsigned int i = 0; i < NumberOfNodes; ++i) omp_init_lock(&lock_array[i]); #pragma omp parallel for for(int k = 0; k < number_of_threads; ++k) { Matrix InvJ(3, 3); double DetJ; unsigned int row, col; typename ElementsArrayType::ptr_iterator it_begin = ElementsArray.ptr_begin() + element_partition[k]; typename ElementsArrayType::ptr_iterator it_end = ElementsArray.ptr_begin() + element_partition[k + 1]; for( ElementsArrayType::ptr_iterator it = it_begin; it != it_end; ++it ) { if(!(*it)->GetValue(IS_INACTIVE)) { const IntegrationPointsArrayType& integration_points = (*it)->GetGeometry().IntegrationPoints((*it)->GetIntegrationMethod()); GeometryType::JacobiansType J(integration_points.size()); // J = (*it)->GetGeometry().Jacobian(J, (*it)->GetIntegrationMethod()); // const Matrix& Ncontainer = (*it)->GetGeometry().ShapeFunctionsValues((*it)->GetIntegrationMethod()); IsogeometricGeometryType& rIsogeometricGeometry = dynamic_cast<IsogeometricGeometryType&>((*it)->GetGeometry()); J = rIsogeometricGeometry.Jacobian0(J, (*it)->GetIntegrationMethod()); GeometryType::ShapeFunctionsGradientsType DN_De; Matrix Ncontainer; rIsogeometricGeometry.CalculateShapeFunctionsIntegrationPointsValuesAndLocalGradients( Ncontainer, DN_De, (*it)->GetIntegrationMethod() ); // get the values at the integration_points std::vector<double> ValuesOnIntPoint(integration_points.size()); (*it)->CalculateOnIntegrationPoints(rThisVariable, ValuesOnIntPoint, pModelPart->GetProcessInfo()); for(unsigned int point = 0; point< integration_points.size(); ++point) { MathUtils<double>::InvertMatrix(J[point], InvJ, DetJ); double dV = DetJ * integration_points[point].Weight(); for(unsigned int prim = 0 ; prim < (*it)->GetGeometry().size(); ++prim) { row = (*it)->GetGeometry()[prim].Id()-1; omp_set_lock(&lock_array[row]); b(row) += (ValuesOnIntPoint[point]) * Ncontainer(point, prim) * dV; for(unsigned int sec = 0 ; sec < (*it)->GetGeometry().size(); ++sec) { col = (*it)->GetGeometry()[sec].Id()-1; M(row, col) += Ncontainer(point, prim) * Ncontainer(point, sec) * dV; } omp_unset_lock(&lock_array[row]); } } } else { // for inactive elements the contribution to LHS is identity matrix and RHS is zero for(unsigned int prim = 0 ; prim < (*it)->GetGeometry().size(); ++prim) { row = (*it)->GetGeometry()[prim].Id()-1; omp_set_lock(&lock_array[row]); // b(row) += 0.0; for(unsigned int sec = 0 ; sec < (*it)->GetGeometry().size(); ++sec) { col = (*it)->GetGeometry()[sec].Id()-1; if(col == row) M(row, col) += 1.0; // else // M(row, col) += 0.0; } omp_unset_lock(&lock_array[row]); } } } } for(unsigned int i = 0; i < NumberOfNodes; ++i) omp_destroy_lock(&lock_array[i]); // solver the system pSolver->Solve(M, g, b); // transfer the solution to the nodal variables for(ModelPart::NodeIterator it = pModelPart->NodesBegin(); it != pModelPart->NodesEnd(); ++it) { it->GetSolutionStepValue(rThisVariable) = g((it->Id()-1)); } } /** * Transfer of rThisVariable defined on integration points to corresponding * nodal values. The transformation is done in a form that ensures a minimization * of L_2-norm error (/sum{rThisVariable- f(x)) whereas * f(x)= /sum{shape_func_i*rThisVariable_i} * @param model_part model_part on which the transfer should be done * @param rThisVariable Matrix-Variable which should be transferred * @see TransferVariablesToNodes(ModelPart& model_part, Variable<Kratos::Matrix>& rThisVariable) * @see TransferVariablesToNodes(ModelPart& model_part, Variable<double>& rThisVariable) * @ref Jiao&Heath: "Common-refinement-based data transfer...", Int. * Journal for numer. meth. in eng. 61 (2004) 2402--2427 * WARNING: this may cause segmentation faults as the respective variables * will be created on nodal level while they are originally intended to be * stored on integration points! * * @param pSolver the solver used for solving the local system matrix * @param pModelPart pointer to model_part that we wish to transfer the result from its integration points to its nodes * @param rThisVariable the variable need to transfer the respected values */ void TransferVariablesToNodes( LinearSolverType::Pointer& pSolver, ModelPart::Pointer& pModelPart, const Variable<Vector>& rThisVariable ) { ElementsArrayType& ElementsArray = pModelPart->Elements(); const unsigned int& Dim = (*(ElementsArray.ptr_begin()))->GetGeometry().WorkingSpaceDimension(); unsigned int VariableSize; if(rThisVariable.Name() == std::string("STRESSES") || rThisVariable.Name() == std::string("PLASTIC_STRAIN_VECTOR") || rThisVariable.Name() == std::string("PRESTRESS") || rThisVariable.Name() == std::string("STRAIN") // TODO: extend for more variables ) { VariableSize = Dim * (Dim + 1) / 2; } else KRATOS_THROW_ERROR(std::logic_error, rThisVariable.Name(), " is not a supported variable for TransferVariablesToNodes routine.") #ifdef ENABLE_PROFILING //profiling variables double start_compute, end_compute; start_compute = OpenMPUtils::GetCurrentTime(); #endif //Initialize system of equations unsigned int NumberOfNodes = pModelPart->NumberOfNodes(); SerialSparseSpaceType::MatrixType M(NumberOfNodes, NumberOfNodes); noalias(M)= ZeroMatrix(NumberOfNodes, NumberOfNodes); // create the structure for M a priori ConstructL2MatrixStructure<Element>(M, ElementsArray); #ifdef ENABLE_PROFILING end_compute = OpenMPUtils::GetCurrentTime(); std::cout << "ConstructMatrixStructure completed: " << end_compute - start_compute << " s" << std::endl; start_compute = end_compute; #endif SerialDenseSpaceType::MatrixType g(NumberOfNodes, VariableSize); noalias(g)= ZeroMatrix(NumberOfNodes, VariableSize); SerialDenseSpaceType::MatrixType b(NumberOfNodes, VariableSize); noalias(b)= ZeroMatrix(NumberOfNodes, VariableSize); std::vector< omp_lock_t > lock_array(NumberOfNodes); for(unsigned int i = 0; i < NumberOfNodes; ++i) omp_init_lock(&lock_array[i]); //create a partition of the element array int number_of_threads = omp_get_max_threads(); std::vector<unsigned int> element_partition; OpenMPUtils::CreatePartition(number_of_threads, ElementsArray.size(), element_partition); KRATOS_WATCH( number_of_threads ) std::cout << "element_partition:"; for (std::size_t i = 0; i < element_partition.size(); ++i) std::cout << " " << element_partition[i]; std::cout << std::endl; #pragma omp parallel for for(int k = 0; k < number_of_threads; ++k) { Matrix InvJ(Dim, Dim); double DetJ; unsigned int row, col; typename ElementsArrayType::ptr_iterator it_begin = ElementsArray.ptr_begin() + element_partition[k]; typename ElementsArrayType::ptr_iterator it_end = ElementsArray.ptr_begin() + element_partition[k + 1]; for( ElementsArrayType::ptr_iterator it = it_begin; it != it_end; ++it ) { if(!(*it)->GetValue(IS_INACTIVE)) { const IntegrationPointsArrayType& integration_points = (*it)->GetGeometry().IntegrationPoints((*it)->GetIntegrationMethod()); GeometryType::JacobiansType J(integration_points.size()); // J = (*it)->GetGeometry().Jacobian(J, (*it)->GetIntegrationMethod()); // const Matrix& Ncontainer = (*it)->GetGeometry().ShapeFunctionsValues((*it)->GetIntegrationMethod()); IsogeometricGeometryType& rIsogeometricGeometry = dynamic_cast<IsogeometricGeometryType&>((*it)->GetGeometry()); J = rIsogeometricGeometry.Jacobian0(J, (*it)->GetIntegrationMethod()); GeometryType::ShapeFunctionsGradientsType DN_De; Matrix Ncontainer; rIsogeometricGeometry.CalculateShapeFunctionsIntegrationPointsValuesAndLocalGradients( Ncontainer, DN_De, (*it)->GetIntegrationMethod() ); // get the values at the integration_points std::vector<Vector> ValuesOnIntPoint(integration_points.size()); (*it)->CalculateOnIntegrationPoints(rThisVariable, ValuesOnIntPoint, pModelPart->GetProcessInfo()); for(unsigned int point = 0; point < integration_points.size(); ++point) { MathUtils<double>::InvertMatrix(J[point], InvJ, DetJ); double dV = DetJ * integration_points[point].Weight(); for(unsigned int prim = 0; prim < (*it)->GetGeometry().size(); ++prim) { row = (*it)->GetGeometry()[prim].Id() - 1; omp_set_lock(&lock_array[row]); for(unsigned int i = 0; i < VariableSize; ++i) b(row, i) += ValuesOnIntPoint[point][i] * Ncontainer(point, prim) * dV; for(unsigned int sec = 0; sec < (*it)->GetGeometry().size(); ++sec) { col = (*it)->GetGeometry()[sec].Id() - 1; M(row, col) += Ncontainer(point, prim) * Ncontainer(point, sec) * dV; } omp_unset_lock(&lock_array[row]); } } } else { // for inactive elements the contribution to LHS is identity matrix and RHS is zero for(unsigned int prim = 0; prim < (*it)->GetGeometry().size(); ++prim) { row = (*it)->GetGeometry()[prim].Id() - 1; omp_set_lock(&lock_array[row]); // for(unsigned int i = 0; i < VariableSize; ++i) // b(row, i) += 0.0; for(unsigned int sec = 0; sec < (*it)->GetGeometry().size(); ++sec) { col = (*it)->GetGeometry()[sec].Id() - 1; if(col == row) M(row, col) += 1.0; // else // M(row, col) += 0.0; } omp_unset_lock(&lock_array[row]); } } } } for(unsigned int i = 0; i < NumberOfNodes; ++i) omp_destroy_lock(&lock_array[i]); #ifdef ENABLE_PROFILING end_compute = OpenMPUtils::GetCurrentTime(); std::cout << "Assemble the matrix completed: " << end_compute - start_compute << " s" << std::endl; start_compute = end_compute; #endif #ifdef DEBUG_MULTISOLVE KRATOS_WATCH(M) KRATOS_WATCH(b) KRATOS_WATCH(*pSolver) #endif // solve the system // solver must support the multisove method pSolver->Solve(M, g, b); #ifdef DEBUG_MULTISOLVE KRATOS_WATCH(g) #endif // transfer the solution to the nodal variables for(ModelPart::NodeIterator it = pModelPart->NodesBegin(); it != pModelPart->NodesEnd(); ++it) { Vector tmp(VariableSize); for(unsigned int i = 0; i < VariableSize; ++i) { tmp(i) = g((it->Id()-1), i); } it->GetSolutionStepValue(rThisVariable) = tmp; } } ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ /// Assignment operator. BezierClassicalPostUtility& operator=(BezierClassicalPostUtility const& rOther) { return *this; } /// Copy constructor. BezierClassicalPostUtility(BezierClassicalPostUtility const& rOther) { } ///@} }; // Class BezierClassicalPostUtility ///@} ///@name Type Definitions ///@{ ///@} ///@name Input and output ///@{ /// input stream function inline std::istream& operator >>(std::istream& rIStream, BezierClassicalPostUtility& rThis) { return rIStream; } /// output stream function inline std::ostream& operator <<(std::ostream& rOStream, const BezierClassicalPostUtility& rThis) { rThis.PrintInfo(rOStream); rOStream << std::endl; rThis.PrintData(rOStream); return rOStream; } ///@} ///@} addtogroup block }// namespace Kratos. #undef DEBUG_LEVEL1 #undef DEBUG_LEVEL2 #undef DEBUG_MULTISOLVE #undef DEBUG_GENERATE_MESH #undef ENABLE_PROFILING #endif
bd_cilk.c
#include <stdlib.h> #include <stdio.h> #include <unistd.h> // access #include <math.h> #include <assert.h> #include "timer.h" #include "bd.h" //#include <omp.h> #include <cilk/cilk.h> #include <cilk/cilk_api.h> #define NTHREADS 24 #define M_PI 3.14159265358979323846 #define my_EPS 0.000000001 void get_indices(int index, int *i, int *j, int *k, int b){ int ib, ib2; ib = index%(b); ib2 = index%(b*b); *k = ib; *i = (index-ib2)/(b*b); *j = (ib2-*k)/b; return; } struct box { int head; }; // it is possible to use smaller boxes and more complex neighbor patterns #define NUM_BOX_NEIGHBORS 14 int box_neighbors[NUM_BOX_NEIGHBORS][3] = { {-1,-1,-1}, {-1,-1, 0}, {-1,-1,+1}, {-1, 0,-1}, {-1, 0, 0}, {-1, 0,+1}, {-1,+1,-1}, {-1,+1, 0}, {-1,+1,+1}, { 0,-1,-1}, { 0,-1, 0}, { 0,-1,+1}, { 0, 0,-1}, { 0, 0, 0} // will calculate within the box interactions }; int bd(int npos, double * restrict pos_orig, double * restrict buf, const int *types, double L, double * restrict pos, int* restrict next, double* restrict forces, double f_const) { __cilkrts_set_param("nworkers", "200"); // Initialisations required for INTERACTION FUNCTION******** NOTE: Can take input to bd itself!!! double krepul = 100, a=1, a_sq, phi=0.2, f; a_sq = a*a; int boxdim;// boxdim is number of cells in L double cutoff2; int numpairs_p; cutoff2 = 4;// cutoff < L/boxdim boxdim =(int)(L/cutoff2)*a;//(int)(L/cutoff2*0.8); printf("L = %lf cutoff2 = %lf boxdim = %d\n", L, cutoff2, boxdim); struct box b[boxdim][boxdim][boxdim]; struct box *bp; struct box *neigh_bp; // box indices int idx, idy, idz, index, box2, ib2; int neigh_idx, neigh_idy, neigh_idz; // allocate implied linked list int p1, p2, j, i; double d2, dx, dy, dz, s; box2 = boxdim*boxdim; //*****************************************END initialisations*********************************** if (boxdim < 4 || cutoff2 > (L/boxdim)*(L/boxdim)) { printf("interactions: bad input parameters\n"); // return 1; } double t0, t_init_cells = 0, t_assign_to_cells=0, t_update_pos=0, t_force=0; for (int step=0; step<INTERVAL_LEN; step++) { // Calculation of interaction per time step t0 = time_in_seconds(); // allocate memory for particles in each box // #pragma omp parallel for schedule(static) private(idx, idy, idz, ib2) shared(b, boxdim, box2) // for (index=0; index<boxdim*box2; index++){ // idz = index%(boxdim); // ib2 = index%(box2); // idx = (index-ib2)/(box2); // idy = (ib2-idz)/boxdim; // b[idx][idy][idz].head=-1; // } for (idx=0; idx<boxdim; idx++){ for (idy=0; idy<boxdim; idy++){ for (idz=0; idz<boxdim; idz++){ b[idx][idy][idz].head=-1; } } } t_init_cells += time_in_seconds()-t0; t0 = time_in_seconds(); // traverse all particles and assign to boxes // #pragma omp parallel for schedule(static) private(i, idx, idy, idz, bp) shared(b, next) num_threads(NTHREADS) // cilk_for (int i=0; i<npos; i++) for (int i=0; i<npos; i++) { int idx, idy, idz; struct box *bp; if (pos_orig[3*i] >= 0){pos[3*i]= fmod(pos_orig[3*i], L);}// OR SINCE PARTICLES moving slowly.. change to -L else {// pos_orig[i] is negative pos[3*i] = L-fmod(-1*pos_orig[3*i], L); } if (pos_orig[3*i+1] >= 0){pos[3*i+1]= fmod(pos_orig[3*i+1], L);}// OR SINCE PARTICLES moving slowly.. change to -L else {// pos_orig[i] is negative pos[3*i+1] = L-fmod(-1*pos_orig[3*i+1], L); } if (pos_orig[3*i+2] >= 0){pos[3*i+2]= fmod(pos_orig[3*i+2], L);}// OR SINCE PARTICLES moving slowly.. change to -L else {// pos_orig[i] is negative pos[3*i+2] = L-fmod(-1*pos_orig[3*i+2], L); } if (pos[3*i]<0){printf("pos_orig = %lf pos defect = %lf and i = %d and L =%lf\n", pos_orig[3*i], pos[3*i], i, L);} // initialize entry of implied linked list next[i] = -1; forces[3*i+0] = 0; forces[3*i+1] = 0; forces[3*i+2] = 0; // re-initialising interaction forces at each time step // which box does the particle belong to? // assumes particles have positions within [0,L]^3 idx = (int)(pos[3*i ]/L*boxdim); idy = (int)(pos[3*i+1]/L*boxdim); idz = (int)(pos[3*i+2]/L*boxdim); // add to beginning of implied linked list bp = &b[idx][idy][idz]; // next[i] = bp->head; // next = previous (my notation) // #pragma omp critical // { next[i] = bp->head; // next = previous (my notation) bp->head = i; // head = latest (my notation) // } } t_assign_to_cells += time_in_seconds()-t0; t0 = time_in_seconds(); // #pragma omp parallel for schedule(static) private(j, neigh_idx, neigh_idy, neigh_idz, neigh_bp, p1, p2, dx, dy, dz, d2, s, f, idx, idy, idz, ib2, bp) shared(b, box_neighbors, boxdim, L, pos, forces, krepul, a, a_sq, next, box2) num_threads(NTHREADS) cilk_for (int index=0; index<boxdim*box2; index++){ int j, neigh_idx, neigh_idy, neigh_idz, p1, p2, f, idx, idy, idz, ib2; double dx, dy, dz, s, d2; struct box *neigh_bp; struct box *bp; idz = index%(boxdim); ib2 = index%(box2); idx = (index-ib2)/(box2); idy = (ib2-idz)/boxdim; bp = &b[idx][idy][idz]; // interactions within and other boxes // #pragma omp parallel for schedule(static) private(j, neigh_idx, neigh_idy, neigh_idz, neigh_bp, p1, p2, dx, dy, dz, d2, s, f) shared(bp, b, box_neighbors, boxdim, L, pos, forces, krepul, a, a_sq, next, idx, idy, idz)// num_threads(NTHREADS) for (j=0; j<NUM_BOX_NEIGHBORS; j++) { neigh_idx = (idx + box_neighbors[j][0] + boxdim) % boxdim; neigh_idy = (idy + box_neighbors[j][1] + boxdim) % boxdim; neigh_idz = (idz + box_neighbors[j][2] + boxdim) % boxdim; neigh_bp = &b[neigh_idx][neigh_idy][neigh_idz]; // when using boxes, the minimum image computation is // known beforehand, thus we can compute position offsets // to compensate for wraparound when computing distances double xoffset = 0.; double yoffset = 0.; double zoffset = 0.; if (idx + box_neighbors[j][0] == -1) xoffset = -L; if (idy + box_neighbors[j][1] == -1) yoffset = -L; if (idz + box_neighbors[j][2] == -1) zoffset = -L; if (idx + box_neighbors[j][0] == boxdim) xoffset = L; if (idy + box_neighbors[j][1] == boxdim) yoffset = L; if (idz + box_neighbors[j][2] == boxdim) zoffset = L; // NOTE: modifying the function to update the forces p1 = neigh_bp->head; while (p1 != -1) { p2 = bp->head; while (p2 != -1) { // compute distance vector dx = pos[3*p1+0] - pos[3*p2+0] + xoffset; dy = pos[3*p1+1] - pos[3*p2+1] + yoffset; dz = pos[3*p1+2] - pos[3*p2+2] + zoffset; d2 = dx*dx+dy*dy+dz*dz+my_EPS; if ( d2<4.0*a_sq) { s = sqrt(d2); f = krepul*(2*a-s); // #pragma omp atomic forces[3*p1+0] += f*dx/s; // #pragma omp atomic forces[3*p1+1] += f*dy/s; // #pragma omp atomic forces[3*p1+2] += f*dz/s; // #pragma omp atomic forces[3*p2+0] -= f*dx/s; // #pragma omp atomic forces[3*p2+1] -= f*dy/s; // #pragma omp atomic forces[3*p2+2] -= f*dz/s; } p2 = next[p2]; } p1 = next[p1]; } } } t_force += time_in_seconds() - t0; t0 = time_in_seconds(); // generate random values from standard normal distribution // note: this MKL function is sequential but vectorized vdRngGaussian(VSL_RNG_METHOD_GAUSSIAN_BOXMULLER, stream, 3*npos, buf, 0., 1.); // update positions with Brownian displacements // #pragma omp parallel for schedule(static) shared(pos_orig) private(i) num_threads(NTHREADS) cilk_for (int i=0; i<3*npos; i++) { pos_orig[i] += forces[i]*DELTAT+f_const*buf[i]; } t_update_pos += time_in_seconds() - t0; } printf("--------------------------------------------------------\n"); printf("Time: %f for initiating the cell head \n", t_init_cells); printf("Time: %f for assigning particles to cells \n", t_assign_to_cells); printf("Time: %f for force calculations \n", t_force); printf("Time: %f for pos update \n", t_update_pos); printf("--------------------------------------------------------\n"); return 0; }
GB_binop__lxor_fp32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__lxor_fp32) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__lxor_fp32) // A.*B function (eWiseMult): GB (_AemultB_03__lxor_fp32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__lxor_fp32) // A*D function (colscale): GB (_AxD__lxor_fp32) // D*A function (rowscale): GB (_DxB__lxor_fp32) // C+=B function (dense accum): GB (_Cdense_accumB__lxor_fp32) // C+=b function (dense accum): GB (_Cdense_accumb__lxor_fp32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lxor_fp32) // C=scalar+B GB (_bind1st__lxor_fp32) // C=scalar+B' GB (_bind1st_tran__lxor_fp32) // C=A+scalar GB (_bind2nd__lxor_fp32) // C=A'+scalar GB (_bind2nd_tran__lxor_fp32) // C type: float // A type: float // B,b type: float // BinaryOp: cij = ((aij != 0) != (bij != 0)) #define GB_ATYPE \ float #define GB_BTYPE \ float #define GB_CTYPE \ float // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ float bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ float t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = ((x != 0) != (y != 0)) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LXOR || GxB_NO_FP32 || GxB_NO_LXOR_FP32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__lxor_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__lxor_fp32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__lxor_fp32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type float float bwork = (*((float *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__lxor_fp32) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__lxor_fp32) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__lxor_fp32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__lxor_fp32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__lxor_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__lxor_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__lxor_fp32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__lxor_fp32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *Cx = (float *) Cx_output ; float x = (*((float *) x_input)) ; float *Bx = (float *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; float bij = Bx [p] ; Cx [p] = ((x != 0) != (bij != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__lxor_fp32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; float *Cx = (float *) Cx_output ; float *Ax = (float *) Ax_input ; float y = (*((float *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; float aij = Ax [p] ; Cx [p] = ((aij != 0) != (y != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = Ax [pA] ; \ Cx [pC] = ((x != 0) != (aij != 0)) ; \ } GrB_Info GB (_bind1st_tran__lxor_fp32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ float #if GB_DISABLE return (GrB_NO_VALUE) ; #else float x = (*((const float *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ float } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = Ax [pA] ; \ Cx [pC] = ((aij != 0) != (y != 0)) ; \ } GrB_Info GB (_bind2nd_tran__lxor_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float y = (*((const float *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
pr32362-3.c
/* PR middle-end/32362 */ /* { dg-do run } */ /* { dg-options "-O2" } */ #include <omp.h> #include <stdlib.h> int a = 2; int main () { int n[4] = { -1, -1, -1, -1 }; int b = 4; omp_set_num_threads (4); omp_set_dynamic (0); omp_set_nested (1); #pragma omp parallel private(b) { b = omp_get_thread_num (); #pragma omp parallel firstprivate(a) { a = (omp_get_thread_num () + a) + 1; if (b == omp_get_thread_num ()) n[omp_get_thread_num ()] = a + (b << 4); } } if (n[0] != 3) abort (); if (n[3] != -1 && (n[1] != 0x14 || n[2] != 0x25 || n[3] != 0x36)) abort (); return 0; }
deconvolution_pack8to1.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void deconvolution_pack8to1_avx(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_packed, const Mat& bias_data, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int channels = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int kernel_extent_w = dilation_w * (kernel_w - 1) + 1; const int kernel_extent_h = dilation_h * (kernel_h - 1) + 1; const int maxk = kernel_w * kernel_h; const float* bias_data_ptr = bias_data; // num_output #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { float* outptr = top_blob.channel(p); for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { float sum = 0.f; if (bias_data_ptr) { sum = bias_data_ptr[p]; } __m256 _sum = _mm256_setzero_ps(); const float* kptr = weight_data_packed.channel(p); // channels for (int q = 0; q < channels; q++) { const Mat m = bottom_blob.channel(q); for (int y = 0; y < kernel_h; y++) { int sys = (i + y * dilation_h - (kernel_extent_h - 1)); if (sys < 0 || sys % stride_h != 0) continue; int sy = sys / stride_h; if (sy >= h) continue; for (int x = 0; x < kernel_w; x++) { int sxs = (j + x * dilation_w - (kernel_extent_w - 1)); if (sxs < 0 || sxs % stride_w != 0) continue; int sx = sxs / stride_w; if (sx >= w) continue; const float* sptr = m.row(sy) + sx * 8; int k = y * kernel_w + x; __m256 _val = _mm256_load_ps(sptr); __m256 _w = _mm256_load_ps(kptr + k * 8); _sum = _mm256_comp_fmadd_ps(_val, _w, _sum); } } kptr += maxk * 8; } sum += _mm256_reduce_add_ps(_sum); sum = activation_ss(sum, activation_type, activation_params); outptr[0] = sum; outptr++; } } } }
GB_binop__eq_int16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__eq_int16) // A.*B function (eWiseMult): GB (_AemultB_08__eq_int16) // A.*B function (eWiseMult): GB (_AemultB_02__eq_int16) // A.*B function (eWiseMult): GB (_AemultB_04__eq_int16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__eq_int16) // A*D function (colscale): GB (_AxD__eq_int16) // D*A function (rowscale): GB (_DxB__eq_int16) // C+=B function (dense accum): GB (_Cdense_accumB__eq_int16) // C+=b function (dense accum): GB (_Cdense_accumb__eq_int16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__eq_int16) // C=scalar+B GB (_bind1st__eq_int16) // C=scalar+B' GB (_bind1st_tran__eq_int16) // C=A+scalar GB (_bind2nd__eq_int16) // C=A'+scalar GB (_bind2nd_tran__eq_int16) // C type: bool // A type: int16_t // B,b type: int16_t // BinaryOp: cij = (aij == bij) #define GB_ATYPE \ int16_t #define GB_BTYPE \ int16_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int16_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int16_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x == y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_EQ || GxB_NO_INT16 || GxB_NO_EQ_INT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__eq_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__eq_int16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__eq_int16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type int16_t int16_t bwork = (*((int16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__eq_int16) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__eq_int16) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__eq_int16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__eq_int16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__eq_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__eq_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__eq_int16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__eq_int16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; int16_t x = (*((int16_t *) x_input)) ; int16_t *Bx = (int16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int16_t bij = GBX (Bx, p, false) ; Cx [p] = (x == bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__eq_int16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; int16_t *Ax = (int16_t *) Ax_input ; int16_t y = (*((int16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int16_t aij = GBX (Ax, p, false) ; Cx [p] = (aij == y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x == aij) ; \ } GrB_Info GB (_bind1st_tran__eq_int16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t x = (*((const int16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij == y) ; \ } GrB_Info GB (_bind2nd_tran__eq_int16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t y = (*((const int16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
zero_mat.c
/* Zero out a block matrix. */ #include <stdlib.h> #include <stdio.h> #include "declarations.h" void zero_mat(A) struct blockmatrix A; { int i,j; int blk; /* * Loop through the blocks, zeroing one at a time. */ for (blk=1; blk<=A.nblocks; blk++) { /* * Zero out block i. */ switch (A.blocks[blk].blockcategory) { case DIAG: for (i=1; i<=A.blocks[blk].blocksize; i++) { A.blocks[blk].data.vec[i]=0.0; }; break; case MATRIX: #pragma omp parallel for schedule(dynamic,64) default(none) shared(A,blk) private(j,i) for (j=1; j<=A.blocks[blk].blocksize; j++) for (i=1; i<=A.blocks[blk].blocksize; i++) A.blocks[blk].data.mat[ijtok(i,j,A.blocks[blk].blocksize)]=0.0; break; default: printf("Illegal block type \n"); exit(12); }; }; }
stream.c
/*-----------------------------------------------------------------------*/ /* Program: STREAM */ /* Revision: $Id: stream.c,v 5.10 2013/01/17 16:01:06 mccalpin Exp mccalpin $ */ /* Original code developed by John D. McCalpin */ /* Programmers: John D. McCalpin */ /* Joe R. Zagar */ /* */ /* This program measures memory transfer rates in MB/s for simple */ /* computational kernels coded in C. */ /*-----------------------------------------------------------------------*/ /* Copyright 1991-2013: John D. McCalpin */ /*-----------------------------------------------------------------------*/ /* License: */ /* 1. You are free to use this program and/or to redistribute */ /* this program. */ /* 2. You are free to modify this program for your own use, */ /* including commercial use, subject to the publication */ /* restrictions in item 3. */ /* 3. You are free to publish results obtained from running this */ /* program, or from works that you derive from this program, */ /* with the following limitations: */ /* 3a. In order to be referred to as "STREAM benchmark results", */ /* published results must be in conformance to the STREAM */ /* Run Rules, (briefly reviewed below) published at */ /* http://www.cs.virginia.edu/stream/ref.html */ /* and incorporated herein by reference. */ /* As the copyright holder, John McCalpin retains the */ /* right to determine conformity with the Run Rules. */ /* 3b. Results based on modified source code or on runs not in */ /* accordance with the STREAM Run Rules must be clearly */ /* labelled whenever they are published. Examples of */ /* proper labelling include: */ /* "tuned STREAM benchmark results" */ /* "based on a variant of the STREAM benchmark code" */ /* Other comparable, clear, and reasonable labelling is */ /* acceptable. */ /* 3c. Submission of results to the STREAM benchmark web site */ /* is encouraged, but not required. */ /* 4. Use of this program or creation of derived works based on this */ /* program constitutes acceptance of these licensing restrictions. */ /* 5. Absolutely no warranty is expressed or implied. */ /*-----------------------------------------------------------------------*/ # include <stdio.h> # include <unistd.h> # include <math.h> # include <float.h> # include <limits.h> # include <sys/time.h> /*----------------------------------------------------------------------- * INSTRUCTIONS: * * 1) STREAM requires different amounts of memory to run on different * systems, depending on both the system cache size(s) and the * granularity of the system timer. * You should adjust the value of 'STREAM_ARRAY_SIZE' (below) * to meet *both* of the following criteria: * (a) Each array must be at least 4 times the size of the * available cache memory. I don't worry about the difference * between 10^6 and 2^20, so in practice the minimum array size * is about 3.8 times the cache size. * Example 1: One Xeon E3 with 8 MB L3 cache * STREAM_ARRAY_SIZE should be >= 4 million, giving * an array size of 30.5 MB and a total memory requirement * of 91.5 MB. * Example 2: Two Xeon E5's with 20 MB L3 cache each (using OpenMP) * STREAM_ARRAY_SIZE should be >= 20 million, giving * an array size of 153 MB and a total memory requirement * of 458 MB. * (b) The size should be large enough so that the 'timing calibration' * output by the program is at least 20 clock-ticks. * Example: most versions of Windows have a 10 millisecond timer * granularity. 20 "ticks" at 10 ms/tic is 200 milliseconds. * If the chip is capable of 10 GB/s, it moves 2 GB in 200 msec. * This means the each array must be at least 1 GB, or 128M elements. * * Version 5.10 increases the default array size from 2 million * elements to 10 million elements in response to the increasing * size of L3 caches. The new default size is large enough for caches * up to 20 MB. * Version 5.10 changes the loop index variables from "register int" * to "ssize_t", which allows array indices >2^32 (4 billion) * on properly configured 64-bit systems. Additional compiler options * (such as "-mcmodel=medium") may be required for large memory runs. * * Array size can be set at compile time without modifying the source * code for the (many) compilers that support preprocessor definitions * on the compile line. E.g., * gcc -O -DSTREAM_ARRAY_SIZE=100000000 stream.c -o stream.100M * will override the default size of 10M with a new size of 100M elements * per array. */ #ifndef STREAM_ARRAY_SIZE # define STREAM_ARRAY_SIZE 10000000 #endif /* 2) STREAM runs each kernel "NTIMES" times and reports the *best* result * for any iteration after the first, therefore the minimum value * for NTIMES is 2. * There are no rules on maximum allowable values for NTIMES, but * values larger than the default are unlikely to noticeably * increase the reported performance. * NTIMES can also be set on the compile line without changing the source * code using, for example, "-DNTIMES=7". */ #ifdef NTIMES #if NTIMES<=1 # define NTIMES 10 #endif #endif #ifndef NTIMES # define NTIMES 10 #endif /* Users are allowed to modify the "OFFSET" variable, which *may* change the * relative alignment of the arrays (though compilers may change the * effective offset by making the arrays non-contiguous on some systems). * Use of non-zero values for OFFSET can be especially helpful if the * STREAM_ARRAY_SIZE is set to a value close to a large power of 2. * OFFSET can also be set on the compile line without changing the source * code using, for example, "-DOFFSET=56". */ #ifndef OFFSET # define OFFSET 0 #endif /* * 3) Compile the code with optimization. Many compilers generate * unreasonably bad code before the optimizer tightens things up. * If the results are unreasonably good, on the other hand, the * optimizer might be too smart for me! * * For a simple single-core version, try compiling with: * cc -O stream.c -o stream * This is known to work on many, many systems.... * * To use multiple cores, you need to tell the compiler to obey the OpenMP * directives in the code. This varies by compiler, but a common example is * gcc -O -fopenmp stream.c -o stream_omp * The environment variable OMP_NUM_THREADS allows runtime control of the * number of threads/cores used when the resulting "stream_omp" program * is executed. * * To run with single-precision variables and arithmetic, simply add * -DSTREAM_TYPE=float * to the compile line. * Note that this changes the minimum array sizes required --- see (1) above. * * The preprocessor directive "TUNED" does not do much -- it simply causes the * code to call separate functions to execute each kernel. Trivial versions * of these functions are provided, but they are *not* tuned -- they just * provide predefined interfaces to be replaced with tuned code. * * * 4) Optional: Mail the results to mccalpin@cs.virginia.edu * Be sure to include info that will help me understand: * a) the computer hardware configuration (e.g., processor model, memory type) * b) the compiler name/version and compilation flags * c) any run-time information (such as OMP_NUM_THREADS) * d) all of the output from the test case. * * Thanks! * *-----------------------------------------------------------------------*/ # define HLINE "-------------------------------------------------------------\n" # ifndef MIN # define MIN(x,y) ((x)<(y)?(x):(y)) # endif # ifndef MAX # define MAX(x,y) ((x)>(y)?(x):(y)) # endif #ifndef STREAM_TYPE #define STREAM_TYPE double #endif static STREAM_TYPE a[STREAM_ARRAY_SIZE+OFFSET], b[STREAM_ARRAY_SIZE+OFFSET], c[STREAM_ARRAY_SIZE+OFFSET]; static double avgtime[4] = {0}, maxtime[4] = {0}, mintime[4] = {FLT_MAX,FLT_MAX,FLT_MAX,FLT_MAX}; static char *label[4] = {"Copy: ", "Scale: ", "Add: ", "Triad: "}; static double bytes[4] = { 2 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE, 2 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE, 3 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE, 3 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE }; extern double mysecond(); extern void checkSTREAMresults(); #ifdef TUNED extern void tuned_STREAM_Copy(); extern void tuned_STREAM_Scale(STREAM_TYPE scalar); extern void tuned_STREAM_Add(); extern void tuned_STREAM_Triad(STREAM_TYPE scalar); #endif #ifdef _OPENMP extern int omp_get_num_threads(); #endif int main() { int quantum, checktick(); int BytesPerWord; int k; ssize_t j; STREAM_TYPE scalar; double t, times[4][NTIMES]; /* --- SETUP --- determine precision and check timing --- */ printf(HLINE); printf("STREAM version $Revision: 5.10 $\n"); printf(HLINE); BytesPerWord = sizeof(STREAM_TYPE); printf("This system uses %d bytes per array element.\n", BytesPerWord); printf(HLINE); #ifdef N printf("***** WARNING: ******\n"); printf(" It appears that you set the preprocessor variable N when compiling this code.\n"); printf(" This version of the code uses the preprocesor variable STREAM_ARRAY_SIZE to control the array size\n"); printf(" Reverting to default value of STREAM_ARRAY_SIZE=%llu\n",(unsigned long long) STREAM_ARRAY_SIZE); printf("***** WARNING: ******\n"); #endif printf("Array size = %llu (elements), Offset = %d (elements)\n" , (unsigned long long) STREAM_ARRAY_SIZE, OFFSET); printf("Memory per array = %.1f MiB (= %.1f GiB).\n", BytesPerWord * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024.0), BytesPerWord * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024.0/1024.0)); printf("Total memory required = %.1f MiB (= %.1f GiB).\n", (3.0 * BytesPerWord) * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024.), (3.0 * BytesPerWord) * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024./1024.)); printf("Each kernel will be executed %d times.\n", NTIMES); printf(" The *best* time for each kernel (excluding the first iteration)\n"); printf(" will be used to compute the reported bandwidth.\n"); #ifdef _OPENMP printf(HLINE); #pragma omp parallel { #pragma omp master { k = omp_get_num_threads(); printf ("Number of Threads requested = %i\n",k); } } #endif #ifdef _OPENMP k = 0; #pragma omp parallel #pragma omp atomic k++; printf ("Number of Threads counted = %i\n",k); #endif /* Get initial value for system clock. */ #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) { a[j] = 1.0; b[j] = 2.0; c[j] = 0.0; } printf(HLINE); if ( (quantum = checktick()) >= 1) printf("Your clock granularity/precision appears to be " "%d microseconds.\n", quantum); else { printf("Your clock granularity appears to be " "less than one microsecond.\n"); quantum = 1; } t = mysecond(); #pragma omp parallel for for (j = 0; j < STREAM_ARRAY_SIZE; j++) a[j] = 2.0E0 * a[j]; t = 1.0E6 * (mysecond() - t); printf("Each test below will take on the order" " of %d microseconds.\n", (int) t ); printf(" (= %d clock ticks)\n", (int) (t/quantum) ); printf("Increase the size of the arrays if this shows that\n"); printf("you are not getting at least 20 clock ticks per test.\n"); printf(HLINE); printf("WARNING -- The above is only a rough guideline.\n"); printf("For best results, please be sure you know the\n"); printf("precision of your system timer.\n"); printf(HLINE); /* --- MAIN LOOP --- repeat test cases NTIMES times --- */ scalar = 3.0; for (k=0; k<NTIMES; k++) { times[0][k] = mysecond(); #ifdef TUNED tuned_STREAM_Copy(); #else #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) c[j] = a[j]; #endif times[0][k] = mysecond() - times[0][k]; times[1][k] = mysecond(); #ifdef TUNED tuned_STREAM_Scale(scalar); #else #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) b[j] = scalar*c[j]; #endif times[1][k] = mysecond() - times[1][k]; times[2][k] = mysecond(); #ifdef TUNED tuned_STREAM_Add(); #else #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) c[j] = a[j]+b[j]; #endif times[2][k] = mysecond() - times[2][k]; times[3][k] = mysecond(); #ifdef TUNED tuned_STREAM_Triad(scalar); #else #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) a[j] = b[j]+scalar*c[j]; #endif times[3][k] = mysecond() - times[3][k]; } /* --- SUMMARY --- */ for (k=1; k<NTIMES; k++) /* note -- skip first iteration */ { for (j=0; j<4; j++) { avgtime[j] = avgtime[j] + times[j][k]; mintime[j] = MIN(mintime[j], times[j][k]); maxtime[j] = MAX(maxtime[j], times[j][k]); } } printf("Function Best Rate MB/s Avg time Min time Max time\n"); for (j=0; j<4; j++) { avgtime[j] = avgtime[j]/(double)(NTIMES-1); printf("%s%12.1f %11.6f %11.6f %11.6f\n", label[j], 1.0E-06 * bytes[j]/mintime[j], avgtime[j], mintime[j], maxtime[j]); } printf(HLINE); /* --- Check Results --- */ checkSTREAMresults(); printf(HLINE); return 0; } # define M 20 int checktick() { int i, minDelta, Delta; double t1, t2, timesfound[M]; /* Collect a sequence of M unique time values from the system. */ for (i = 0; i < M; i++) { t1 = mysecond(); while( ((t2=mysecond()) - t1) < 1.0E-6 ) ; timesfound[i] = t1 = t2; } /* * Determine the minimum difference between these M values. * This result will be our estimate (in microseconds) for the * clock granularity. */ minDelta = 1000000; for (i = 1; i < M; i++) { Delta = (int)( 1.0E6 * (timesfound[i]-timesfound[i-1])); minDelta = MIN(minDelta, MAX(Delta,0)); } return(minDelta); } /* A gettimeofday routine to give access to the wall clock timer on most UNIX-like systems. */ #include <sys/time.h> double mysecond() { struct timeval tp; struct timezone tzp; int i; i = gettimeofday(&tp,&tzp); return ( (double) tp.tv_sec + (double) tp.tv_usec * 1.e-6 ); } #ifndef abs #define abs(a) ((a) >= 0 ? (a) : -(a)) #endif void checkSTREAMresults () { STREAM_TYPE aj,bj,cj,scalar; STREAM_TYPE aSumErr,bSumErr,cSumErr; STREAM_TYPE aAvgErr,bAvgErr,cAvgErr; double epsilon; ssize_t j; int k,ierr,err; /* reproduce initialization */ aj = 1.0; bj = 2.0; cj = 0.0; /* a[] is modified during timing check */ aj = 2.0E0 * aj; /* now execute timing loop */ scalar = 3.0; for (k=0; k<NTIMES; k++) { cj = aj; bj = scalar*cj; cj = aj+bj; aj = bj+scalar*cj; } /* accumulate deltas between observed and expected results */ aSumErr = 0.0; bSumErr = 0.0; cSumErr = 0.0; for (j=0; j<STREAM_ARRAY_SIZE; j++) { aSumErr += abs(a[j] - aj); bSumErr += abs(b[j] - bj); cSumErr += abs(c[j] - cj); // if (j == 417) printf("Index 417: c[j]: %f, cj: %f\n",c[j],cj); // MCCALPIN } aAvgErr = aSumErr / (STREAM_TYPE) STREAM_ARRAY_SIZE; bAvgErr = bSumErr / (STREAM_TYPE) STREAM_ARRAY_SIZE; cAvgErr = cSumErr / (STREAM_TYPE) STREAM_ARRAY_SIZE; if (sizeof(STREAM_TYPE) == 4) { epsilon = 1.e-6; } else if (sizeof(STREAM_TYPE) == 8) { epsilon = 1.e-13; } else { printf("WEIRD: sizeof(STREAM_TYPE) = %d\n",sizeof(STREAM_TYPE)); epsilon = 1.e-6; } err = 0; if (abs(aAvgErr/aj) > epsilon) { err++; printf ("Failed Validation on array a[], AvgRelAbsErr > epsilon (%e)\n",epsilon); printf (" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n",aj,aAvgErr,abs(aAvgErr)/aj); ierr = 0; for (j=0; j<STREAM_ARRAY_SIZE; j++) { if (abs(a[j]/aj-1.0) > epsilon) { ierr++; #ifdef VERBOSE if (ierr < 10) { printf(" array a: index: %ld, expected: %e, observed: %e, relative error: %e\n", j,aj,a[j],abs((aj-a[j])/aAvgErr)); } #endif } } printf(" For array a[], %d errors were found.\n",ierr); } if (abs(bAvgErr/bj) > epsilon) { err++; printf ("Failed Validation on array b[], AvgRelAbsErr > epsilon (%e)\n",epsilon); printf (" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n",bj,bAvgErr,abs(bAvgErr)/bj); printf (" AvgRelAbsErr > Epsilon (%e)\n",epsilon); ierr = 0; for (j=0; j<STREAM_ARRAY_SIZE; j++) { if (abs(b[j]/bj-1.0) > epsilon) { ierr++; #ifdef VERBOSE if (ierr < 10) { printf(" array b: index: %ld, expected: %e, observed: %e, relative error: %e\n", j,bj,b[j],abs((bj-b[j])/bAvgErr)); } #endif } } printf(" For array b[], %d errors were found.\n",ierr); } if (abs(cAvgErr/cj) > epsilon) { err++; printf ("Failed Validation on array c[], AvgRelAbsErr > epsilon (%e)\n",epsilon); printf (" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n",cj,cAvgErr,abs(cAvgErr)/cj); printf (" AvgRelAbsErr > Epsilon (%e)\n",epsilon); ierr = 0; for (j=0; j<STREAM_ARRAY_SIZE; j++) { if (abs(c[j]/cj-1.0) > epsilon) { ierr++; #ifdef VERBOSE if (ierr < 10) { printf(" array c: index: %ld, expected: %e, observed: %e, relative error: %e\n", j,cj,c[j],abs((cj-c[j])/cAvgErr)); } #endif } } printf(" For array c[], %d errors were found.\n",ierr); } if (err == 0) { printf ("Solution Validates: avg error less than %e on all three arrays\n",epsilon); } #ifdef VERBOSE printf ("Results Validation Verbose Results: \n"); printf (" Expected a(1), b(1), c(1): %f %f %f \n",aj,bj,cj); printf (" Observed a(1), b(1), c(1): %f %f %f \n",a[1],b[1],c[1]); printf (" Rel Errors on a, b, c: %e %e %e \n",abs(aAvgErr/aj),abs(bAvgErr/bj),abs(cAvgErr/cj)); #endif } #ifdef TUNED /* stubs for "tuned" versions of the kernels */ void tuned_STREAM_Copy() { ssize_t j; #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) c[j] = a[j]; } void tuned_STREAM_Scale(STREAM_TYPE scalar) { ssize_t j; #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) b[j] = scalar*c[j]; } void tuned_STREAM_Add() { ssize_t j; #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) c[j] = a[j]+b[j]; } void tuned_STREAM_Triad(STREAM_TYPE scalar) { ssize_t j; #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) a[j] = b[j]+scalar*c[j]; } /* end of stubs for the "tuned" versions of the kernels */ #endif
FEMTree.h
/* Copyright (c) 2006, Michael Kazhdan and Matthew Bolitho All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. Neither the name of the Johns Hopkins University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ // -- [TODO] Make as many of the functions (related to the solver) const as possible. // -- [TODO] Move the point interpolation constraint scaling by 1<<maxDepth // -- [TODO] Add support for staggered-grid test functions // -- [TODO] Store signatures with constraints/systems/restriction-prolongations // -- [TODO] Make a virtual evaluation that only needs to know the degree // -- [TODO] Modify (public) functions so that template parameters don't need to be passed when they are called // -- [TODO] Confirm that whenever _isValidFEM*Node is called, the flags have already been set. // -- [TODO] Make weight evaluation more efficient in _getSamplesPerNode by reducing the number of calls to getNeighbors // -- [TODO] For point evaluation: // 1. Have the evaluator store stencils for all depths [DONE] // 2. When testing centers/corners, don't use generic evaluation #ifndef FEM_TREE_INCLUDED #define FEM_TREE_INCLUDED #define VERSION "10.05" #define MEMORY_ALLOCATOR_BLOCK_SIZE 1<<12 #define NEW_CODE #include <atomic> #include "MyMiscellany.h" #include "BSplineData.h" #include "Geometry.h" #include "PointStream.h" #include "RegularTree.h" #include "SparseMatrix.h" #include <functional> template< unsigned int Dim , class Real > class FEMTree; enum { SHOW_GLOBAL_RESIDUAL_NONE , SHOW_GLOBAL_RESIDUAL_LAST , SHOW_GLOBAL_RESIDUAL_ALL , SHOW_GLOBAL_RESIDUAL_COUNT }; const char* ShowGlobalResidualNames[] = { "show none" , "show last" , "show all" }; class FEMTreeNodeData { public: enum { SPACE_FLAG = 1 , FEM_FLAG_1 = 2 , FEM_FLAG_2 = 4 , REFINABLE_FLAG = 8 , GHOST_FLAG = 1<<7 }; int nodeIndex; mutable char flags; void setGhostFlag( bool f ) const { if( f ) flags |= GHOST_FLAG ; else flags &= ~GHOST_FLAG; } bool getGhostFlag( void ) const { return ( flags & GHOST_FLAG )!=0; } FEMTreeNodeData( void ); ~FEMTreeNodeData( void ); }; template< unsigned int Dim > class SortedTreeNodes { typedef RegularTreeNode< Dim , FEMTreeNodeData > TreeNode; protected: Pointer( Pointer( int ) ) _sliceStart; int _levels; public: Pointer( TreeNode* ) treeNodes; int begin( int depth ) const { return _sliceStart[depth][0]; } int end( int depth ) const { return _sliceStart[depth][(size_t)1<<depth]; } int begin( int depth , int slice ) const { return _sliceStart[depth][ slice<0 ? 0 : ( slice>(1<<depth) ? (1<<depth) : slice ) ]; } int end( int depth , int slice ) const { return begin( depth , slice+1 ); } int size( void ) const { return _sliceStart[_levels-1][(size_t)1<<(_levels-1)]; } int size( int depth ) const { if(depth<0||depth>=_levels) printf( "uhoh\n" ); return _sliceStart[depth][(size_t)1<<depth] - _sliceStart[depth][0]; } int size( int depth , int slice ) const { return end( depth , slice ) - begin( depth , slice ); } int levels( void ) const { return _levels; } SortedTreeNodes( void ); ~SortedTreeNodes( void ); void set( TreeNode& root , std::vector< int >* map ); size_t set( TreeNode& root ); }; template< typename T > struct DotFunctor{}; template< > struct DotFunctor< float > { double operator()( float v1 , float v2 ){ return v1*v2; } unsigned int dimension( void ) const { return 1; } }; template< > struct DotFunctor< double > { double operator()( double v1 , double v2 ){ return v1*v2; } unsigned int dimension( void ) const { return 1; } }; template< class Real , unsigned int Dim > struct DotFunctor< Point< Real , Dim > > { double operator()( Point< Real , Dim > v1 , Point< Real , Dim > v2 ){ return Point< Real , Dim >::Dot( v1 , v2 ); } unsigned int dimension( void ) const { return Dim; } }; template< typename Pack > struct SupportKey{ }; template< unsigned int ... Degrees > struct SupportKey< UIntPack< Degrees ... > > : public RegularTreeNode< sizeof...(Degrees) , FEMTreeNodeData >::template NeighborKey< UIntPack< (-BSplineSupportSizes< Degrees >::SupportStart) ... > , UIntPack< BSplineSupportSizes< Degrees >::SupportEnd ... > > { typedef UIntPack< (-BSplineSupportSizes< Degrees >::SupportStart ) ... > LeftRadii; typedef UIntPack< ( BSplineSupportSizes< Degrees >::SupportEnd ) ... > RightRadii; typedef UIntPack< ( BSplineSupportSizes< Degrees >::SupportSize ) ... > Sizes; }; template< typename Pack > struct ConstSupportKey{ }; template< unsigned int ... Degrees > struct ConstSupportKey< UIntPack< Degrees ... > > : public RegularTreeNode< sizeof...(Degrees) , FEMTreeNodeData >::template ConstNeighborKey< UIntPack< (-BSplineSupportSizes< Degrees >::SupportStart ) ... > , UIntPack< BSplineSupportSizes< Degrees >::SupportEnd ... > > { typedef UIntPack< (-BSplineSupportSizes< Degrees >::SupportStart ) ... > LeftRadii; typedef UIntPack< ( BSplineSupportSizes< Degrees >::SupportEnd ) ... > RightRadii; typedef UIntPack< ( BSplineSupportSizes< Degrees >::SupportSize ) ... > Sizes; }; template< typename Pack > struct OverlapKey{ }; template< unsigned int ... Degrees > struct OverlapKey< UIntPack< Degrees ... > > : public RegularTreeNode< sizeof...(Degrees) , FEMTreeNodeData >::template NeighborKey< UIntPack< (-BSplineOverlapSizes< Degrees , Degrees >::OverlapStart ) ... > , UIntPack< BSplineOverlapSizes< Degrees , Degrees >::OverlapEnd ... > > { typedef UIntPack< (-BSplineOverlapSizes< Degrees , Degrees >::OverlapStart ) ... > LeftRadii; typedef UIntPack< ( BSplineOverlapSizes< Degrees , Degrees >::OverlapEnd ) ... > RightRadii; typedef UIntPack< ( BSplineOverlapSizes< Degrees , Degrees >::OverlapSize ) ... > Sizes; }; template< typename Pack > struct ConstOverlapKey{ }; template< unsigned int ... Degrees > struct ConstOverlapKey< UIntPack< Degrees ... > > : public RegularTreeNode< sizeof...(Degrees) , FEMTreeNodeData >::template ConstNeighborKey< UIntPack< (-BSplineOverlapSizes< Degrees , Degrees >::OverlapStart ) ... > , UIntPack< BSplineOverlapSizes< Degrees , Degrees >::OverlapEnd ... > > { typedef UIntPack< (-BSplineOverlapSizes< Degrees , Degrees >::OverlapStart ) ... > LeftRadii; typedef UIntPack< ( BSplineOverlapSizes< Degrees , Degrees >::OverlapEnd ) ... > RightRadii; typedef UIntPack< ( BSplineOverlapSizes< Degrees , Degrees >::OverlapSize ) ... > Sizes; }; template< typename Pack > struct PointSupportKey{ }; template< unsigned int ... Degrees > struct PointSupportKey< UIntPack< Degrees ... > > : public RegularTreeNode< sizeof...(Degrees) , FEMTreeNodeData >::template NeighborKey< UIntPack< BSplineSupportSizes< Degrees >::SupportEnd ... > , UIntPack< (-BSplineSupportSizes< Degrees >::SupportStart ) ... > > { typedef UIntPack< ( BSplineSupportSizes< Degrees >::SupportEnd ) ... > LeftRadii; typedef UIntPack< (-BSplineSupportSizes< Degrees >::SupportStart ) ... > RightRadii; typedef UIntPack< ( BSplineSupportSizes< Degrees >::SupportEnd - BSplineSupportSizes< Degrees >::SupportStart + 1 ) ... > Sizes; }; template< typename Pack > struct ConstPointSupportKey{ }; template< unsigned int ... Degrees > struct ConstPointSupportKey< UIntPack< Degrees ... > > : public RegularTreeNode< sizeof...(Degrees) , FEMTreeNodeData >::template ConstNeighborKey< UIntPack< BSplineSupportSizes< Degrees >::SupportEnd ... > , UIntPack< (-BSplineSupportSizes< Degrees >::SupportStart ) ... > > { typedef UIntPack< ( BSplineSupportSizes< Degrees >::SupportEnd ) ... > LeftRadii; typedef UIntPack< (-BSplineSupportSizes< Degrees >::SupportStart ) ... > RightRadii; typedef UIntPack< ( BSplineSupportSizes< Degrees >::SupportEnd - BSplineSupportSizes< Degrees >::SupportStart + 1 ) ... > Sizes; }; template< typename Pack > struct CornerSupportKey{ }; template< unsigned int ... Degrees > struct CornerSupportKey< UIntPack< Degrees ... > > : public RegularTreeNode< sizeof...(Degrees) , FEMTreeNodeData >::template NeighborKey< UIntPack< BSplineSupportSizes< Degrees >::BCornerEnd ... > , UIntPack< ( -BSplineSupportSizes< Degrees >::BCornerStart + 1 ) ... > > { typedef UIntPack< ( BSplineSupportSizes< Degrees >::BCornerEnd ) ... > LeftRadii; typedef UIntPack< (-BSplineSupportSizes< Degrees >::BCornerStart + 1 ) ... > RightRadii; typedef UIntPack< ( BSplineSupportSizes< Degrees >::BCornerSize + 1 ) ... > Sizes; }; template< typename Pack > struct ConstCornerSupportKey{ }; template< unsigned int ... Degrees > struct ConstCornerSupportKey< UIntPack< Degrees ... > > : public RegularTreeNode< sizeof...(Degrees) , FEMTreeNodeData >::template ConstNeighborKey< UIntPack< BSplineSupportSizes< Degrees >::BCornerEnd ... > , UIntPack< ( -BSplineSupportSizes< Degrees >::BCornerStart + 1 ) ... > > { typedef UIntPack< ( BSplineSupportSizes< Degrees >::BCornerEnd ) ... > LeftRadii; typedef UIntPack< (-BSplineSupportSizes< Degrees >::BCornerStart + 1 ) ... > RightRadii; typedef UIntPack< ( BSplineSupportSizes< Degrees >::BCornerSize + 1 ) ... > Sizes; }; // This represents a vector that can only grow in size. // It has the property that once a reference to an element is returned, that reference remains valid until the vector is destroyed. template< typename T , unsigned int LogBlockSize=10 , unsigned InitialBlocks=10 , unsigned int AllocationMultiplier=2 > struct BlockedVector { BlockedVector( T defaultValue=T() ) : _defaultValue( defaultValue ) { _reservedBlocks = InitialBlocks; _blocks = NewPointer< Pointer( T ) >( _reservedBlocks ); for( size_t i=0 ; i<_reservedBlocks ; i++ ) _blocks[i] = NullPointer( Pointer( T ) ); _allocatedBlocks = _size = 0; } ~BlockedVector( void ) { for( size_t i=0 ; i<_allocatedBlocks ; i++ ) DeletePointer( _blocks[i] ); DeletePointer( _blocks ); } BlockedVector( const BlockedVector& v ) { _reservedBlocks = v._reservedBlocks , _allocatedBlocks = v._allocatedBlocks , _size = v._size , _defaultValue = v._defaultValue; _blocks = NewPointer< Pointer( T ) >( _reservedBlocks ); for( size_t i=0 ; i<_allocatedBlocks ; i++ ) { _blocks[i] = NewPointer< T >( _BlockSize ); memcpy( _blocks[i] , v._blocks[i] , sizeof(T)*_BlockSize ); } for( size_t i=_allocatedBlocks ; i<_reservedBlocks ; i++ ) _blocks[i] = NullPointer( Pointer ( T ) ); } BlockedVector& operator = ( const BlockedVector& v ) { for( size_t i=0 ; i<_allocatedBlocks ; i++ ) DeletePointer( _blocks[i] ); DeletePointer( _blocks ); _reservedBlocks = v._reservedBlocks , _blocks = v._blocks , _allocatedBlocks = v._allocatedBlocks , _size = v._size , _defaultValue = v._defaultValue; _blocks = NewPointer< Pointer( T ) >( _reservedBlocks ); for( size_t i=0 ; i<_allocatedBlocks ; i++ ) { _blocks[i] = NewPointer< T >( _BlockSize ); memcpy( _blocks[i] , v._blocks[i] , sizeof(T)*_BlockSize ); } for( size_t i=_allocatedBlocks ; i<_reservedBlocks ; i++ ) _blocks[i] = NullPointer( Pointer ( T ) ); return *this; } BlockedVector( BlockedVector&& v ) { _reservedBlocks = v._reservedBlocks , _allocatedBlocks = v._allocatedBlocks , _size = v._size , _defaultValue = v._defaultValue , _blocks = v._blocks; v._reservedBlocks = v._allocatedBlocks = v._size = 0 , v._blocks = NullPointer( Pointer( T ) ); } BlockedVector& operator = ( BlockedVector&& v ) { for( size_t i=0 ; i<_allocatedBlocks ; i++ ) DeletePointer( _blocks[i] ); DeletePointer( _blocks ); _reservedBlocks = v._reservedBlocks , _allocatedBlocks = v._allocatedBlocks , _size = v._size , _defaultValue = v._defaultValue , _blocks = v._blocks; v._reservedBlocks = v._allocatedBlocks = v._size = 0 , v._blocks = NullPointer( Pointer( T ) ); return *this; } size_t size( void ) const { return _size; } const T& operator[]( size_t idx ) const { return _blocks[idx>>LogBlockSize][idx&_Mask]; } T& operator[]( size_t idx ){ return _blocks[idx>>LogBlockSize][idx&_Mask]; } size_t resize( size_t size ){ return resize( size , _defaultValue ); } size_t resize( size_t size , const T& defaultValue ) { if( size<=_size ) { #ifdef _MSC_VER WARN( "BlockedVector::resize: new size must be greater than old size: %llu > %llu" , size , _size ); #else // !MSC_VER WARN( "BlockedVector::resize: new size must be greater than old size: %lu > %lu" , size , _size ); #endif // _MSC_VER return _size; } size_t index = size-1; size_t block = index >> LogBlockSize; size_t blockIndex = index & _Mask; // If there are insufficiently many blocks if( block>=_reservedBlocks ) { size_t newReservedSize = std::max< size_t >( _reservedBlocks * AllocationMultiplier , block+1 ); Pointer( Pointer( T ) ) __blocks = NewPointer< Pointer( T ) >( newReservedSize ); memcpy( __blocks , _blocks , sizeof( Pointer( T ) ) * _reservedBlocks ); for( size_t i=_reservedBlocks ; i<newReservedSize ; i++ ) __blocks[i] = NullPointer( Pointer( T ) ); Pointer( Pointer( T ) ) _oldBlocks = _blocks; _blocks = __blocks; _reservedBlocks = newReservedSize; DeletePointer( _oldBlocks ); } // If the block hasn't been allocated if( block>=_allocatedBlocks ) { for( size_t b=_allocatedBlocks ; b<=block ; b++ ) { _blocks[b] = NewPointer< T >( _BlockSize ); for( size_t i=0 ; i<_BlockSize ; i++ ) _blocks[b][i] = defaultValue; } _allocatedBlocks = block+1; } _size = index+1; return index; } size_t push( void ){ return resize( _size+1 ); } protected: static const size_t _BlockSize = 1<<LogBlockSize; static const size_t _Mask = (1<<LogBlockSize)-1; T _defaultValue; size_t _allocatedBlocks , _reservedBlocks; size_t _size; Pointer( Pointer( T ) ) _blocks; }; template< class Data , typename Pack > struct _SparseOrDenseNodeData{}; template< class Data , unsigned int ... FEMSigs > struct _SparseOrDenseNodeData< Data , UIntPack< FEMSigs ... > > { static const unsigned int Dim = sizeof ... ( FEMSigs ); typedef UIntPack< FEMSigs ... > FEMSignatures; typedef Data data_type; virtual size_t size( void ) const = 0; virtual const Data& operator[] ( int idx ) const = 0; virtual Data& operator[] ( int idx ) = 0; virtual Data& operator[]( const RegularTreeNode< Dim , FEMTreeNodeData >* node ) = 0; virtual Data* operator()( const RegularTreeNode< Dim , FEMTreeNodeData >* node ) = 0; virtual const Data* operator()( const RegularTreeNode< Dim , FEMTreeNodeData >* node ) const = 0; }; template< class Data , typename Pack > struct SparseNodeData{}; template< class Data , unsigned int ... FEMSigs > struct SparseNodeData< Data , UIntPack< FEMSigs ... > > : public _SparseOrDenseNodeData< Data , UIntPack< FEMSigs ... > > { static const unsigned int Dim = sizeof ... ( FEMSigs ); size_t size( void ) const { return _data.size(); } const Data& operator[] ( int idx ) const { return _data[idx]; } Data& operator[] ( int idx ) { return _data[idx]; } void reserve( size_t sz ){ if( sz>_indices.size() ) _indices.resize( sz , -1 ); } Data* operator()( const RegularTreeNode< Dim , FEMTreeNodeData >* node ){ return ( node->nodeData.nodeIndex<0 || node->nodeData.nodeIndex>=(int)_indices.size() || _indices[ node->nodeData.nodeIndex ]<0 ) ? NULL : &_data[ _indices[ node->nodeData.nodeIndex ] ]; } const Data* operator()( const RegularTreeNode< Dim , FEMTreeNodeData >* node ) const { return ( node->nodeData.nodeIndex<0 || node->nodeData.nodeIndex>=(int)_indices.size() || _indices[ node->nodeData.nodeIndex ]<0 ) ? NULL : &_data[ _indices[ node->nodeData.nodeIndex ] ]; } Data& operator[]( const RegularTreeNode< Dim , FEMTreeNodeData >* node ) { // If the node hasn't been indexed yet if( node->nodeData.nodeIndex>=(int)_indices.size() ) #pragma omp critical( SparseNodeData__operator ) if( node->nodeData.nodeIndex>=(int)_indices.size() ) _indices.resize( node->nodeData.nodeIndex+1 , -1 ); // If the node hasn't been allocated yet if( _indices[ node->nodeData.nodeIndex ]==-1 ) #pragma omp critical( SparseNodeData__operator ) if( _indices[ node->nodeData.nodeIndex ]==-1 ) _indices[ node->nodeData.nodeIndex ] = (int)_data.push(); return _data[ _indices[ node->nodeData.nodeIndex ] ]; } int index( const RegularTreeNode< Dim , FEMTreeNodeData >* node ) const { if( !node || node->nodeData.nodeIndex<0 || node->nodeData.nodeIndex>=(int)_indices.size() ) return -1; else return _indices[ node->nodeData.nodeIndex ]; } protected: template< unsigned int _Dim , class _Real > friend class FEMTree; // Map should be the size of the old number of entries and map[i] should give the new index of the old i-th node void _remapIndices( const int* newNodeIndices , unsigned int newNodeCount ) { BlockedVector< int > newIndices; newIndices.resize( newNodeCount ); for( int i=0 ; i<(int)newNodeCount ; i++ ) newIndices[i] = -1; for( size_t i=0 ; i<(int)_indices.size() ; i++ ) if( newNodeIndices[i]>=0 && newNodeIndices[i]<(int)newNodeCount ) newIndices[ newNodeIndices[i] ] = _indices[i]; _indices = newIndices; } BlockedVector< int > _indices; BlockedVector< Data > _data; }; template< class Data , typename Pack > struct DenseNodeData{}; template< class Data , unsigned int ... FEMSigs > struct DenseNodeData< Data , UIntPack< FEMSigs ... > > : public _SparseOrDenseNodeData< Data , UIntPack< FEMSigs ... > > { static const unsigned int Dim = sizeof ... ( FEMSigs ); DenseNodeData( void ) { _data = NullPointer( Data ) ; _sz = 0; } DenseNodeData( size_t sz ){ _sz = sz ; if( sz ) _data = NewPointer< Data >( sz ) ; else _data = NullPointer( Data ); } DenseNodeData( const DenseNodeData& d ) : DenseNodeData() { _resize( d._sz ) ; if( _sz ) memcpy( _data , d._data , sizeof(Data) * _sz ); } DenseNodeData( DenseNodeData&& d ){ _data = d._data , _sz = d._sz ; d._data = NullPointer( Data ) , d._sz = 0; } DenseNodeData& operator = ( const DenseNodeData& d ){ _resize( d._sz ) ; if( _sz ) memcpy( _data , d._data , sizeof(Data) * _sz ) ; return *this; } DenseNodeData& operator = ( DenseNodeData&& d ){ size_t __sz = _sz ; Pointer( Data ) __data = _data ; _data = d._data , _sz = d._sz ; d._data = __data , d._sz = __sz ; return *this; } ~DenseNodeData( void ){ DeletePointer( _data ) ; _sz = 0; } static void WriteSignatures( FILE* fp ) { unsigned int dim = sizeof ... ( FEMSigs ); fwrite( &dim , sizeof(unsigned int) , 1 , fp ); unsigned int femSigs[] = { FEMSigs ... }; fwrite( femSigs , sizeof(unsigned int) , dim , fp ); } void write( FILE* fp ) const { fwrite( &_sz , sizeof(size_t) , 1 , fp ) ; fwrite( _data , sizeof(Data) , _sz , fp ); } void read( FILE* fp ) { if( fread( &_sz , sizeof(size_t) , 1 , fp )!=1 ) ERROR_OUT( "Failed to read size" ); _data = NewPointer< Data >( _sz ); if( fread ( _data , sizeof(Data) , _sz , fp )!=_sz ) ERROR_OUT( "failed to read data" ); } Data& operator[] ( int idx ) { return _data[idx]; } const Data& operator[] ( int idx ) const { return _data[idx]; } size_t size( void ) const { return _sz; } Data& operator[]( const RegularTreeNode< Dim , FEMTreeNodeData >* node ) { return _data[ node->nodeData.nodeIndex ]; } Data* operator()( const RegularTreeNode< Dim , FEMTreeNodeData >* node ) { return ( node==NULL || node->nodeData.nodeIndex>=(int)_sz ) ? NULL : &_data[ node->nodeData.nodeIndex ]; } const Data* operator()( const RegularTreeNode< Dim , FEMTreeNodeData >* node ) const { return ( node==NULL || node->nodeData.nodeIndex>=(int)_sz ) ? NULL : &_data[ node->nodeData.nodeIndex ]; } int index( const RegularTreeNode< Dim , FEMTreeNodeData >* node ) const { return ( !node || node->nodeData.nodeIndex<0 || node->nodeData.nodeIndex>=(int)this->_data.size() ) ? -1 : node->nodeData.nodeIndex; } Pointer( Data ) operator()( void ) { return _data; } ConstPointer( Data ) operator()( void ) const { return ( ConstPointer( Data ) )_data; } protected: template< unsigned int _Dim , class _Real > friend class FEMTree; // Map should be the size of the old number of entries and map[i] should give the new index of the old i-th node void _remapIndices( const int* newNodeIndices , size_t newNodeCount ) { Pointer( Data ) newData = NewPointer< Data >( newNodeCount ); memset( newData , 0 , sizeof(Data)*newNodeCount ); for( size_t i=0 ; i<_sz ; i++ ) if( newNodeIndices[i]>=0 && newNodeIndices[i]<newNodeCount ) newData[ newNodeIndices[i] ] = _data[i]; DeletePointer( _data ); _data = newData; _sz = newNodeCount; } size_t _sz; void _resize( size_t sz ){ DeletePointer( _data ) ; if( sz ) _data = NewPointer< Data >( sz ) ; else _data = NullPointer( Data ) ; _sz = sz; } Pointer( Data ) _data; }; enum FEMTreeRealType { FEM_TREE_REAL_FLOAT , FEM_TREE_REAL_DOUBLE , FEM_TREE_REAL_COUNT }; const char* FEMTreeRealNames[] = { "float" , "double" }; void ReadFEMTreeParameter( FILE* fp , FEMTreeRealType& realType , int &dimension ) { if( fread( &realType , sizeof(FEMTreeRealType) , 1 , fp )!=1 ) ERROR_OUT( "Failed to read real type" ); if( fread( &dimension , sizeof(int) , 1 , fp )!=1 ) ERROR_OUT( "Failed to read dimension" ); } unsigned int* ReadDenseNodeDataSignatures( FILE* fp , unsigned int &dim ) { if( fread( &dim , sizeof(unsigned int) , 1 , fp )!=1 ) ERROR_OUT( "Failed to read dimension" ); unsigned int* femSigs = new unsigned int[dim]; if( fread( femSigs , sizeof(unsigned int) , dim , fp )!=dim ) ERROR_OUT( "Failed to read signatures" ); return femSigs; } // The Derivative method needs static members: // Dim: the dimensionality of the space in which derivatives are evaluated // Size: the total number of derivatives // and static methods: // Index: takes the number of partials along each dimension and returns the index // Factor: takes an index and sets the number of partials along each dimension template< typename T > struct TensorDerivatives{ }; template< class Real , typename T > struct TensorDerivativeValues{ }; // Specify the derivatives for each dimension separately template< unsigned int D , unsigned int ... Ds > struct TensorDerivatives< UIntPack< D , Ds ... > > { typedef TensorDerivatives< UIntPack< Ds ... > > _TensorDerivatives; static const int LastDerivative = UIntPack< D , Ds ... >::template Get< sizeof ... (Ds) >(); static const int Dim = _TensorDerivatives::Dim + 1; static const unsigned int Size = _TensorDerivatives::Size * ( D+1 ); static void Factor( unsigned int idx , unsigned int derivatives[Dim] ){ derivatives[0] = idx / _TensorDerivatives::Size ; _TensorDerivatives::Factor( idx % _TensorDerivatives::Size , derivatives+1 ); } static unsigned int Index( const unsigned int derivatives[Dim] ){ return _TensorDerivatives::Index( derivatives + 1 ) + _TensorDerivatives::Size * derivatives[0]; } }; template< unsigned int D > struct TensorDerivatives< UIntPack< D > > { static const int LastDerivative = D; static const int Dim = 1; static const unsigned int Size = D+1; static void Factor( unsigned int idx , unsigned int derivatives[1] ){ derivatives[0] = idx; } static unsigned int Index( const unsigned int derivatives[1] ){ return derivatives[0]; } }; template< class Real , unsigned int ... Ds > struct TensorDerivativeValues< Real , UIntPack< Ds ... > > : public Point< Real , TensorDerivatives< UIntPack< Ds ... > >::Size >{ }; // Specify the sum of the derivatives template< unsigned int Dim , unsigned int D > struct CumulativeDerivatives { typedef CumulativeDerivatives< Dim , D-1 > _CumulativeDerivatives; static const int LastDerivative = D; static const unsigned int Size = _CumulativeDerivatives::Size * Dim + 1; static void Factor( unsigned int idx , unsigned int d[Dim] ) { if( idx<_CumulativeDerivatives::Size ) return _CumulativeDerivatives::Factor( idx , d ); else _Factor( idx - _CumulativeDerivatives::Size , d ); } static unsigned int Index( const unsigned int derivatives[Dim] ) { int dCount = 0; for( int d=0 ; d<Dim ; d++ ) dCount += derivatives[d]; if( dCount>=D ) ERROR_OUT( "More derivatives than allowed" ); else if( dCount<D ) return _CumulativeDerivatives::Index( derivatives ); else return _CumulativeDerivatives::Size + _Index( derivatives ); } protected: static const unsigned int _Size = _CumulativeDerivatives::_Size * Dim; static void _Factor( unsigned int idx , unsigned int d[Dim] ) { _CumulativeDerivatives::_Factor( idx % _CumulativeDerivatives::_Size , d ); d[ idx / _CumulativeDerivatives::_Size ]++; } static unsigned int _Index( const unsigned int d[Dim] ) { unsigned int _d[Dim]; memcpy( _d , d , sizeof(_d) ); for( int i=0 ; i<Dim ; i++ ) if( _d[i] ) { _d[i]--; return _CumulativeDerivatives::Index( _d ) * Dim + i; } ERROR_OUT( "No derivatives specified" ); return -1; } friend CumulativeDerivatives< Dim , D+1 >; }; template< unsigned int Dim > struct CumulativeDerivatives< Dim , 0 > { static const int LastDerivative = 0; static const unsigned int Size = 1; static void Factor( unsigned int idx , unsigned int d[Dim] ){ memset( d , 0 , sizeof(unsigned int)*Dim ); } static unsigned int Index( const unsigned int derivatives[Dim] ){ return 0; } protected: static const unsigned int _Size = 1; static void _Factor( unsigned int idx , unsigned int d[Dim] ){ memset( d , 0 , sizeof(unsigned int)*Dim ); } friend CumulativeDerivatives< Dim , 1 >; }; template< typename Real , unsigned int Dim , unsigned int D > using CumulativeDerivativeValues = Point< Real , CumulativeDerivatives< Dim , D >::Size >; template< unsigned int Dim , class Real , unsigned int D > CumulativeDerivativeValues< Real , Dim , D > Evaluate( const double dValues[Dim][D+1] ) { CumulativeDerivativeValues< Real , Dim , D > v; unsigned int _d[Dim]; for( int d=0 ; d<CumulativeDerivatives< Dim , D >::Size ; d++ ) { CumulativeDerivatives< Dim , D >::Factor( d , _d ); double value = dValues[0][ _d[0] ]; for( int dd=1 ; dd<Dim ; dd++ ) value *= dValues[dd][ _d[dd] ]; v[d] = (Real)value; } return v; } template< unsigned int Dim , class Real , typename T , unsigned int D > struct DualPointInfo { Point< Real , Dim > position; Real weight; CumulativeDerivativeValues< T , Dim , D > dualValues; DualPointInfo operator + ( const DualPointInfo& p ) const { return DualPointInfo( position + p.position , dualValues + p.dualValues , weight + p.weight ); } DualPointInfo& operator += ( const DualPointInfo& p ){ position += p.position ; weight += p.weight , dualValues += p.dualValues ; return *this; } DualPointInfo operator * ( Real s ) const { return DualPointInfo( position*s , weight*s , dualValues*s ); } DualPointInfo& operator *= ( Real s ){ position *= s , weight *= s , dualValues *= s ; return *this; } DualPointInfo operator / ( Real s ) const { return DualPointInfo( position/s , weight/s , dualValues/s ); } DualPointInfo& operator /= ( Real s ){ position /= s , weight /= s , dualValues /= s ; return *this; } DualPointInfo( void ) : weight(0) { } DualPointInfo( Point< Real , Dim > p , CumulativeDerivativeValues< T , Dim , D > c , Real w ) { position = p , dualValues = c , weight = w; } }; template< unsigned int Dim , class Real , typename Data , typename T , unsigned int D > struct DualPointAndDataInfo { DualPointInfo< Dim , Real , T , D > pointInfo; Data data; DualPointAndDataInfo operator + ( const DualPointAndDataInfo& p ) const { return DualPointAndDataInfo( pointInfo + p.pointInfo , data + p.data ); } DualPointAndDataInfo operator * ( Real s ) const { return DualPointAndDataInfo( pointInfo * s , data * s ); } DualPointAndDataInfo operator / ( Real s ) const { return DualPointAndDataInfo( pointInfo / s , data / s ); } DualPointAndDataInfo& operator += ( const DualPointAndDataInfo& p ){ pointInfo += p.pointInfo ; data += p.data ; return *this; } DualPointAndDataInfo& operator *= ( Real s ) { pointInfo *= s , data *= s ; return *this; } DualPointAndDataInfo& operator /= ( Real s ) { pointInfo /= s , data /= s ; return *this; } DualPointAndDataInfo( void ){ } DualPointAndDataInfo( DualPointInfo< Dim , Real , T , D > p , Data d ) { pointInfo = p , data = d; } }; template< unsigned int Dim , class Real , typename T , unsigned int D > struct DualPointInfoBrood { DualPointInfo< Dim , Real , T , D >& operator[]( size_t idx ){ return _dpInfo[idx]; } const DualPointInfo< Dim , Real , T , D >& operator[]( size_t idx ) const { return _dpInfo[idx]; } void finalize( void ){ _size = 0 ; for( int i=0 ; i<(1<<Dim) ; i++ ) if( _dpInfo[i].weight>0 ) _dpInfo[_size++] = _dpInfo[i]; } unsigned int size( void ) const { return _size; } DualPointInfoBrood operator + ( const DualPointInfoBrood& p ) const { DualPointInfoBrood d ; for( int i=0 ; i<(1<<Dim) ; i++ ) d._dpInfo[i] = _dpInfo[i] + p._dpInfo[i] ; return d; } DualPointInfoBrood operator * ( Real s ) const { DualPointInfoBrood d ; for( int i=0 ; i<(1<<Dim) ; i++ ) d._dpInfo[i] = _dpInfo[i] * s ; return d; } DualPointInfoBrood operator / ( Real s ) const { DualPointInfoBrood d ; for( int i=0 ; i<(1<<Dim) ; i++ ) d._dpInfo[i] = _dpInfo[i] / s ; return d; } DualPointInfoBrood& operator += ( const DualPointInfoBrood& p ){ for( int i=0 ; i<(1<<Dim) ; i++ ) _dpInfo[i] += p._dpInfo[i] ; return *this; } DualPointInfoBrood& operator *= ( Real s ) { for( int i=0 ; i<(1<<Dim) ; i++ ) _dpInfo[i] *= s ; return *this; } DualPointInfoBrood& operator /= ( Real s ) { for( int i=0 ; i<(1<<Dim) ; i++ ) _dpInfo[i] /= s ; return *this; } protected: DualPointInfo< Dim , Real , T , D > _dpInfo[1<<Dim]; unsigned int _size; }; template< unsigned int Dim , class Real , typename Data , typename T , unsigned int D > struct DualPointAndDataInfoBrood { DualPointAndDataInfo< Dim , Real , Data , T , D >& operator[]( size_t idx ){ return _dpInfo[idx]; } const DualPointAndDataInfo< Dim , Real , Data , T , D >& operator[]( size_t idx ) const { return _dpInfo[idx]; } void finalize( void ){ _size = 0 ; for( int i=0 ; i<(1<<Dim) ; i++ ) if( _dpInfo[i].pointInfo.weight>0 ) _dpInfo[_size++] = _dpInfo[i]; } unsigned int size( void ) const { return _size; } DualPointAndDataInfoBrood operator + ( const DualPointAndDataInfoBrood& p ) const { DualPointAndDataInfoBrood d ; for( int i=0 ; i<(1<<Dim) ; i++ ) d._dpInfo[i] = _dpInfo[i] + p._dpInfo[i] ; return d; } DualPointAndDataInfoBrood operator * ( Real s ) const { DualPointAndDataInfoBrood d ; for( int i=0 ; i<(1<<Dim) ; i++ ) d._dpInfo[i] = _dpInfo[i] * s ; return d; } DualPointAndDataInfoBrood operator / ( Real s ) const { DualPointAndDataInfoBrood d ; for( int i=0 ; i<(1<<Dim) ; i++ ) d._dpInfo[i] = _dpInfo[i] / s ; return d; } DualPointAndDataInfoBrood& operator += ( const DualPointAndDataInfoBrood& p ){ for( int i=0 ; i<(1<<Dim) ; i++ ) _dpInfo[i] += p._dpInfo[i] ; return *this; } DualPointAndDataInfoBrood& operator *= ( Real s ) { for( int i=0 ; i<(1<<Dim) ; i++ ) _dpInfo[i] *= s ; return *this; } DualPointAndDataInfoBrood& operator /= ( Real s ) { for( int i=0 ; i<(1<<Dim) ; i++ ) _dpInfo[i] /= s ; return *this; } protected: DualPointAndDataInfo< Dim , Real , Data , T , D > _dpInfo[1<<Dim]; unsigned int _size; }; //////////////////////////// // The virtual integrator // //////////////////////////// struct BaseFEMIntegrator { template< typename TDegreePack > struct System{}; template< typename TDegreePack > struct RestrictionProlongation{}; template< typename TDegreePack , typename CDegreePack , unsigned int CDim > struct Constraint{}; template< typename TDegreePack > struct SystemConstraint{}; template< typename TDegreePack > struct PointEvaluator{}; protected: template< unsigned int Degree , unsigned int ... Degrees > static typename std::enable_if< sizeof ... ( Degrees )==0 , bool >::type _IsInteriorlySupported( UIntPack< Degree , Degrees ... > , unsigned int depth , const int off[] ) { int begin , end; BSplineSupportSizes< Degree >::InteriorSupportedSpan( depth , begin , end ); return off[0]>=begin && off[0]<end; } template< unsigned int Degree , unsigned int ... Degrees > static typename std::enable_if< sizeof ... ( Degrees )!=0 , bool >::type _IsInteriorlySupported( UIntPack< Degree , Degrees ... > , unsigned int depth , const int off[] ) { int begin , end; BSplineSupportSizes< Degree >::InteriorSupportedSpan( depth , begin , end ); return ( off[0]>=begin && off[0]<end ) && _IsInteriorlySupported( UIntPack< Degrees ... >() , depth , off+1 ); } template< unsigned int Degree , unsigned int ... Degrees > static typename std::enable_if< sizeof ... ( Degrees )==0 , bool >::type _IsInteriorlySupported( UIntPack< Degree , Degrees ... > , unsigned int depth , const int off[] , const double begin[] , const double end[] ) { int res = 1<<depth; double b = ( 0. + off[0] + BSplineSupportSizes< Degree >::SupportStart ) / res; double e = ( 1. + off[0] + BSplineSupportSizes< Degree >::SupportEnd ) / res; return b>=begin[0] && e<=end[0]; } template< unsigned int Degree , unsigned int ... Degrees > static typename std::enable_if< sizeof ... ( Degrees )!=0 , bool >::type _IsInteriorlySupported( UIntPack< Degree , Degrees ... > , unsigned int depth , const int off[] , const double begin[] , const double end[] ) { int res = 1<<depth; double b = ( 0. + off[0] + BSplineSupportSizes< Degree >::SupportStart ) / res; double e = ( 1. + off[0] + BSplineSupportSizes< Degree >::SupportEnd ) / res; return b>=begin[0] && e<=end[0] && _IsInteriorlySupported( UIntPack< Degrees ... >() , depth , off+1 , begin+1 , end+1 ); } template< unsigned int Degree1 , unsigned int ... Degrees1 , unsigned int Degree2 , unsigned int ... Degrees2 > static typename std::enable_if< sizeof ... ( Degrees1 )==0 >::type _InteriorOverlappedSpan( UIntPack< Degree1 , Degrees1 ... > , UIntPack< Degree2 , Degrees2 ... > , int depth , int begin[] , int end[] ) { BSplineIntegrationData< FEMDegreeAndBType< Degree1 , BOUNDARY_NEUMANN >::Signature , FEMDegreeAndBType< Degree2 , BOUNDARY_NEUMANN >::Signature >::InteriorOverlappedSpan( depth , begin[0] , end[0] ); } template< unsigned int Degree1 , unsigned int ... Degrees1 , unsigned int Degree2 , unsigned int ... Degrees2 > static typename std::enable_if< sizeof ... ( Degrees1 )!=0 >::type _InteriorOverlappedSpan( UIntPack< Degree1 , Degrees1 ... > , UIntPack< Degree2 , Degrees2 ... > , int depth , int begin[] , int end[] ) { BSplineIntegrationData< FEMDegreeAndBType< Degree1 , BOUNDARY_NEUMANN >::Signature , FEMDegreeAndBType< Degree2 , BOUNDARY_NEUMANN >::Signature >::InteriorOverlappedSpan( depth , begin[0] , end[0] ); _InteriorOverlappedSpan( UIntPack< Degrees1 ... >() , UIntPack< Degrees2 ... >() , depth , begin+1 , end+1 ); } template< unsigned int Degree1 , unsigned int ... Degrees1 , unsigned int Degree2 , unsigned int ... Degrees2 > static typename std::enable_if< sizeof ... ( Degrees1 )==0 , bool >::type _IsInteriorlyOverlapped( UIntPack< Degree1 , Degrees1 ... > , UIntPack< Degree2 , Degrees2 ... > , unsigned int depth , const int off[] ) { int begin , end; BSplineIntegrationData< FEMDegreeAndBType< Degree1 , BOUNDARY_NEUMANN >::Signature , FEMDegreeAndBType< Degree2 , BOUNDARY_NEUMANN >::Signature >::InteriorOverlappedSpan( depth , begin , end ); return off[0]>= begin && off[0]<end; } template< unsigned int Degree1 , unsigned int ... Degrees1 , unsigned int Degree2 , unsigned int ... Degrees2 > static typename std::enable_if< sizeof ... ( Degrees1 )!=0 , bool >::type _IsInteriorlyOverlapped( UIntPack< Degree1 , Degrees1 ... > , UIntPack< Degree2 , Degrees2 ... > , unsigned int depth , const int off[] ) { int begin , end; BSplineIntegrationData< FEMDegreeAndBType< Degree1 , BOUNDARY_NEUMANN >::Signature , FEMDegreeAndBType< Degree2 , BOUNDARY_NEUMANN >::Signature >::InteriorOverlappedSpan( depth , begin , end ); return ( off[0]>= begin && off[0]<end ) && _IsInteriorlyOverlapped( UIntPack< Degrees1 ... >() , UIntPack< Degrees2 ... >() , depth , off+1 ); } template< unsigned int Degree1 , unsigned int ... Degrees1 , unsigned int Degree2 , unsigned int ... Degrees2 > static typename std::enable_if< sizeof ... ( Degrees1 )==0 >::type _ParentOverlapBounds( UIntPack< Degree1 , Degrees1 ... > , UIntPack< Degree2 , Degrees2 ... > , unsigned int depth , const int off[] , int start[] , int end[] ) { const int OverlapStart = BSplineOverlapSizes< Degree1 , Degree2 >::OverlapStart; start[0] = BSplineOverlapSizes< Degree1 , Degree2 >::ParentOverlapStart[ off[0] & 1 ] - OverlapStart; end [0] = BSplineOverlapSizes< Degree1 , Degree2 >::ParentOverlapEnd [ off[0] & 1 ] - OverlapStart + 1; } template< unsigned int Degree1 , unsigned int ... Degrees1 , unsigned int Degree2 , unsigned int ... Degrees2 > static typename std::enable_if< sizeof ... ( Degrees1 )!=0 >::type _ParentOverlapBounds( UIntPack< Degree1 , Degrees1 ... > , UIntPack< Degree2 , Degrees2 ... > , unsigned int depth , const int off[] , int start[] , int end[] ) { const int OverlapStart = BSplineOverlapSizes< Degree1 , Degree2 >::OverlapStart; start[0] = BSplineOverlapSizes< Degree1 , Degree2 >::ParentOverlapStart[ off[0] & 1 ] - OverlapStart; end [0] = BSplineOverlapSizes< Degree1 , Degree2 >::ParentOverlapEnd [ off[0] & 1 ] - OverlapStart + 1; _ParentOverlapBounds( UIntPack< Degrees1 ... >() , UIntPack< Degrees2 ... >() , depth , off+1 , start+1 , end+1 ); } template< unsigned int Degree1 , unsigned int ... Degrees1 , unsigned int Degree2 , unsigned int ... Degrees2 > static typename std::enable_if< sizeof ... ( Degrees1 )==0 >::type _ParentOverlapBounds( UIntPack< Degree1 , Degrees1 ... > , UIntPack< Degree2 , Degrees2 ... > , int corner , int start[] , int end[] ) { const int OverlapStart = BSplineOverlapSizes< Degree1 , Degree2 >::OverlapStart; start[0] = BSplineOverlapSizes< Degree1 , Degree2 >::ParentOverlapStart[ corner & 1 ] - OverlapStart; end [0] = BSplineOverlapSizes< Degree1 , Degree2 >::ParentOverlapEnd [ corner & 1 ] - OverlapStart + 1; } template< unsigned int Degree1 , unsigned int ... Degrees1 , unsigned int Degree2 , unsigned int ... Degrees2 > static typename std::enable_if< sizeof ... ( Degrees1 )!=0 >::type _ParentOverlapBounds( UIntPack< Degree1 , Degrees1 ... > , UIntPack< Degree2 , Degrees2 ... > , int corner , int start[] , int end[] ) { const int OverlapStart = BSplineOverlapSizes< Degree1 , Degree2 >::OverlapStart; start[0] = BSplineOverlapSizes< Degree1 , Degree2 >::ParentOverlapStart[ corner & 1 ] - OverlapStart; end [0] = BSplineOverlapSizes< Degree1 , Degree2 >::ParentOverlapEnd [ corner & 1 ] - OverlapStart + 1; _ParentOverlapBounds( UIntPack< Degrees1 ... >() , UIntPack< Degrees2 ... >() , corner>>1 , start+1 , end+1 ); } public: template< unsigned int ... Degrees > static bool IsInteriorlySupported( UIntPack< Degrees ... > , int depth , const int offset[] ){ return depth>=0 && _IsInteriorlySupported( UIntPack< Degrees ... >() , depth , offset ); } template< unsigned int ... Degrees > static bool IsInteriorlySupported( UIntPack< Degrees ... > , int depth , const int offset[] , const double begin[] , const double end[] ){ return depth>=0 && _IsInteriorlySupported( UIntPack< Degrees ... >() , depth , offset , begin , end ); } template< unsigned int ... Degrees1 , unsigned int ... Degrees2 > static void InteriorOverlappedSpan( UIntPack< Degrees1 ... > , UIntPack< Degrees2 ... > , int depth , int begin[] , int end[] ) { static_assert( sizeof ... ( Degrees1 ) == sizeof ... ( Degrees2 ) , "[ERROR] Dimensions don't match" ); _InteriorOverlappedSpan( UIntPack< Degrees1 ... >() , UIntPack< Degrees2 ... >() , depth , begin , end ); } template< unsigned int ... Degrees1 , unsigned int ... Degrees2 > static bool IsInteriorlyOverlapped( UIntPack< Degrees1 ... > , UIntPack< Degrees2 ... > , int depth , const int offset[] ) { static_assert( sizeof ... ( Degrees1 ) == sizeof ... ( Degrees2 ) , "[ERROR] Dimensions don't match" ); return depth>=0 && _IsInteriorlyOverlapped( UIntPack< Degrees1 ... >() , UIntPack< Degrees2 ... >() , depth , offset ); } template< unsigned int ... Degrees1 , unsigned int ... Degrees2 > static void ParentOverlapBounds( UIntPack< Degrees1 ... > , UIntPack< Degrees2 ... > , int depth , const int offset[] , int start[] , int end[] ) { static_assert( sizeof ... ( Degrees1 ) == sizeof ... ( Degrees2 ) , "[ERROR] Dimensions don't match" ); if( depth>0 ) _ParentOverlapBounds( UIntPack< Degrees1 ... >() , UIntPack< Degrees2 ... >() , depth , offset , start , end ); } template< unsigned int ... Degrees1 , unsigned int ... Degrees2 > static void ParentOverlapBounds( UIntPack< Degrees1 ... > , UIntPack< Degrees2 ... > , int corner , int start[] , int end[] ) { static_assert( sizeof ... ( Degrees1 ) == sizeof ... ( Degrees2 ) , "[ERROR] Dimensions don't match" ); _ParentOverlapBounds( UIntPack< Degrees1 ... >() , UIntPack< Degrees2 ... >() , corner , start , end ); } template< unsigned int Dim > struct PointEvaluatorState { virtual double value( const int offset[] , const unsigned int d[] ) const = 0; virtual double subValue( const int offset[] , const unsigned int d[] ) const = 0; template< class Real , typename DerivativeType > Point< Real , DerivativeType::Size > dValues( const int offset[] ) const { Point< Real , DerivativeType::Size > v; unsigned int _d[Dim]; for( int d=0 ; d<DerivativeType::Size ; d++ ) { DerivativeType::Factor( d , _d ); v[d] = (Real)value( offset , _d ); } return v; } template< class Real , typename DerivativeType > Point< Real , DerivativeType::LastDerivative+1 > partialDotDValues( Point< Real , DerivativeType::Size > v , const int offset[] ) const { Point< Real , DerivativeType::LastDerivative+1 > dot; unsigned int _d[Dim]; for( int d=0 ; d<DerivativeType::Size ; d++ ) { DerivativeType::Factor( d , _d ); dot[ _d[Dim-1] ] += (Real)( subValue( offset , _d ) * v[d] ); } return dot; } }; template< unsigned int ... TDegrees > struct PointEvaluator< UIntPack< TDegrees ... > > { static const unsigned int Dim = sizeof ... ( TDegrees ); }; template< unsigned int ... TDegrees > struct RestrictionProlongation< UIntPack< TDegrees ... > > { virtual void init( void ){ } virtual double upSampleCoefficient( const int pOff[] , const int cOff[] ) const = 0; typedef DynamicWindow< double , UIntPack< ( - BSplineSupportSizes< TDegrees >::DownSample0Start + BSplineSupportSizes< TDegrees >::DownSample1End + 1 ) ... > > DownSampleStencil; struct UpSampleStencil : public DynamicWindow< double , UIntPack< BSplineSupportSizes< TDegrees >::UpSampleSize ... > > { }; struct DownSampleStencils : public DynamicWindow< DownSampleStencil , IsotropicUIntPack< sizeof ... ( TDegrees ) , 2 > > { }; void init( int highDepth ){ _highDepth = highDepth ; init(); } void setStencil ( UpSampleStencil & stencil ) const; void setStencils( DownSampleStencils& stencils ) const; int highDepth( void ) const { return _highDepth; } protected: int _highDepth; }; template< unsigned int ... TDegrees > struct System< UIntPack< TDegrees ... > > { virtual void init( void ){ } virtual double ccIntegrate( const int off1[] , const int off2[] ) const = 0; virtual double pcIntegrate( const int off1[] , const int off2[] ) const = 0; virtual bool vanishesOnConstants( void ) const { return false; } virtual RestrictionProlongation< UIntPack< TDegrees ... > >& restrictionProlongation( void ) = 0; struct CCStencil : public DynamicWindow< double , UIntPack< BSplineOverlapSizes< TDegrees , TDegrees >::OverlapSize ... > >{ }; #ifdef SHOW_WARNINGS #pragma message ( "[WARNING] Why are the parent/child stencils so big?" ) #endif // SHOW_WARNINGS struct PCStencils : public DynamicWindow< CCStencil , IsotropicUIntPack< sizeof ... ( TDegrees ) , 2 > >{ }; void init( int highDepth ){ _highDepth = highDepth ; init(); } template< bool IterateFirst > void setStencil ( CCStencil & stencil ) const; template< bool IterateFirst > void setStencils( PCStencils& stencils ) const; int highDepth( void ) const { return _highDepth; } protected: int _highDepth; }; template< unsigned int ... TDegrees , unsigned int ... CDegrees , unsigned int CDim > struct Constraint< UIntPack< TDegrees ... > , UIntPack< CDegrees ... > , CDim > { static_assert( sizeof...(TDegrees)==sizeof...(CDegrees) , "[ERROR] BaseFEMIntegrator::Constraint: Test and constraint dimensions don't match" ); virtual void init( void ){ ; } virtual Point< double , CDim > ccIntegrate( const int off1[] , const int off2[] ) const = 0; virtual Point< double , CDim > pcIntegrate( const int off1[] , const int off2[] ) const = 0; virtual Point< double , CDim > cpIntegrate( const int off1[] , const int off2[] ) const = 0; virtual RestrictionProlongation< UIntPack< TDegrees ... > >& tRestrictionProlongation( void ) = 0; virtual RestrictionProlongation< UIntPack< CDegrees ... > >& cRestrictionProlongation( void ) = 0; struct CCStencil : public DynamicWindow< Point< double , CDim > , UIntPack< BSplineOverlapSizes< TDegrees , CDegrees >::OverlapSize ... > >{ }; #ifdef SHOW_WARNINGS #pragma message ( "[WARNING] Why are the parent/child stencils so big?" ) #endif // SHOW_WARNINGS struct PCStencils : public DynamicWindow< CCStencil , IsotropicUIntPack< sizeof ... ( TDegrees ) , 2 > >{ }; struct CPStencils : public DynamicWindow< CCStencil , IsotropicUIntPack< sizeof ... ( TDegrees ) , 2 > >{ }; void init( int highDepth ){ _highDepth = highDepth ; init(); } template< bool IterateFirst > void setStencil ( CCStencil & stencil ) const; template< bool IterateFirst > void setStencils( PCStencils& stencils ) const; template< bool IterateFirst > void setStencils( CPStencils& stencils ) const; int highDepth( void ) const { return _highDepth; } protected: int _highDepth; }; template< unsigned int ... TDegrees > struct SystemConstraint< UIntPack< TDegrees ... > > : public Constraint< UIntPack< TDegrees ... > , UIntPack< TDegrees ... > , 1 > { typedef Constraint< UIntPack< TDegrees ... > , UIntPack< TDegrees ... > , 1 > Base; SystemConstraint( System< UIntPack< TDegrees ... > >& sys ) : _sys( sys ){;} void init( void ){ _sys.init( Base::highDepth() ) ; _sys.init(); } Point< double , 1 > ccIntegrate( const int off1[] , const int off2[] ) const{ return Point< double , 1 >( _sys.ccIntegrate( off1 , off2 ) ); } Point< double , 1 > pcIntegrate( const int off1[] , const int off2[] ) const{ return Point< double , 1 >( _sys.pcIntegrate( off1 , off2 ) ); } Point< double , 1 > cpIntegrate( const int off1[] , const int off2[] ) const{ return Point< double , 1 >( _sys.pcIntegrate( off2 , off1 ) ); } RestrictionProlongation< UIntPack< TDegrees ... > >& tRestrictionProlongation( void ){ return _sys.restrictionProlongation(); } RestrictionProlongation< UIntPack< TDegrees ... > >& cRestrictionProlongation( void ){ return _sys.restrictionProlongation(); } protected: System< UIntPack< TDegrees ... > >& _sys; }; }; ///////////////////////////////////////////////// // An implementation of the virtual integrator // ///////////////////////////////////////////////// struct FEMIntegrator { protected: template< unsigned int FEMSig , unsigned int ... FEMSigs > static typename std::enable_if< sizeof ... ( FEMSigs )==0 , bool >::type _IsValidFEMNode( UIntPack< FEMSig , FEMSigs ... > , unsigned int depth , const int offset[] ) { return !BSplineEvaluationData< FEMSig >::OutOfBounds( depth , offset[0] ); } template< unsigned int FEMSig , unsigned int ... FEMSigs > static typename std::enable_if< sizeof ... ( FEMSigs )!=0 , bool >::type _IsValidFEMNode( UIntPack< FEMSig , FEMSigs ... > , unsigned int depth , const int offset[] ) { return !BSplineEvaluationData< FEMSig >::OutOfBounds( depth , offset[0] ) && _IsValidFEMNode( UIntPack< FEMSigs ... >() , depth , offset+1 ); } template< unsigned int FEMSig , unsigned ... FEMSigs > static typename std::enable_if< sizeof ... ( FEMSigs )==0 , bool >::type _IsOutOfBounds( UIntPack< FEMSig , FEMSigs ... > , unsigned int depth , const int offset[] ) { return BSplineEvaluationData< FEMSig >::OutOfBounds( depth , offset[0] ); } template< unsigned int FEMSig , unsigned ... FEMSigs > static typename std::enable_if< sizeof ... ( FEMSigs )!=0 , bool >::type _IsOutOfBounds( UIntPack< FEMSig , FEMSigs ... > , unsigned int depth , const int offset[] ) { return BSplineEvaluationData< FEMSig >::OutOfBounds( depth , offset[0] ) || _IsOutOfBounds( UIntPack< FEMSigs ... >() , depth , offset+1 ); } template< unsigned int FEMSig , unsigned int ... FEMSigs > static typename std::enable_if< sizeof ... ( FEMSigs )==0 >::type _BSplineBegin( UIntPack< FEMSig , FEMSigs ... > , unsigned int depth , int begin[] ) { begin[0] = BSplineEvaluationData< FEMSig >::Begin( depth ); } template< unsigned int FEMSig , unsigned int ... FEMSigs > static typename std::enable_if< sizeof ... ( FEMSigs )!=0 >::type _BSplineBegin( UIntPack< FEMSig , FEMSigs ... > , unsigned int depth , int begin[] ) { begin[0] = BSplineEvaluationData< FEMSig >::Begin( depth ) ; _BSplineBegin( UIntPack< FEMSigs ... >() , depth , begin+1 ); } template< unsigned int FEMSig , unsigned int ... FEMSigs > static typename std::enable_if< sizeof ... ( FEMSigs )==0 >::type _BSplineEnd( UIntPack< FEMSig , FEMSigs ... > , unsigned int depth , int end[] ) { end[0] = BSplineEvaluationData< FEMSig >::End( depth ); } template< unsigned int FEMSig , unsigned int ... FEMSigs > static typename std::enable_if< sizeof ... ( FEMSigs )!=0 >::type _BSplineEnd( UIntPack< FEMSig , FEMSigs ... > , unsigned int depth , int end[] ) { end[0] = BSplineEvaluationData< FEMSig >::End( depth ) ; _BSplineEnd( UIntPack< FEMSigs ... >() , depth , end+1 ); } template< unsigned int FEMSig , unsigned int ... FEMSigs > static typename std::enable_if< sizeof ... ( FEMSigs )==0 , double >::type _Integral( UIntPack< FEMSig , FEMSigs ... > , unsigned int depth , const int offset[] , const double begin[] , const double end[] ) { return BSplineEvaluationData< FEMSig >::Integral( depth , offset[0] , begin[0] , end[0] , 0 ); } template< unsigned int FEMSig , unsigned int ... FEMSigs > static typename std::enable_if< sizeof ... ( FEMSigs )!=0 , double >::type _Integral( UIntPack< FEMSig , FEMSigs ... > , unsigned int depth , const int offset[] , const double begin[] , const double end[] ) { return BSplineEvaluationData< FEMSig >::Integral( depth , offset[0] , begin[0] , end[0] , 0 ) * _Integral( UIntPack< FEMSigs ... >() , depth , offset+1 , begin+1 , end+1 ); } public: template< unsigned int ... FEMSigs > static double Integral( UIntPack< FEMSigs ... > , int depth , const int offset[] , const double begin[] , const double end[] ) { if( depth<0 ) return 0; else return _Integral( UIntPack< FEMSigs ... >() , depth , offset , begin , end ); } template< unsigned int ... FEMSigs > static bool IsValidFEMNode( UIntPack< FEMSigs ... > , int depth , const int offset[] ){ return _IsValidFEMNode( UIntPack< FEMSigs ... >() , depth , offset ); } template< unsigned int ... FEMSigs > static bool IsOutOfBounds( UIntPack< FEMSigs ... > , int depth , const int offset[] ){ return depth<0 || _IsOutOfBounds( UIntPack< FEMSigs ... >() , depth , offset ); } template< unsigned int ... FEMSigs > static void BSplineBegin( UIntPack< FEMSigs ... > , int depth , int begin[] ){ if( depth>=0 ) _BSplineBegin( UIntPack< FEMSigs ... >() , depth , begin ); } template< unsigned int ... FEMSigs > static void BSplineEnd ( UIntPack< FEMSigs ... > , int depth , int end [] ){ if( depth>=0 ) _BSplineEnd ( UIntPack< FEMSigs ... >() , depth , end ); } template< typename TSignatures , typename TDerivatives > struct System{}; template< typename TSignatures , typename TDerivatives , typename CSignatures , typename CDerivatives , unsigned int CDim > struct Constraint{}; template< typename TSignatures , typename TDerivatives , typename CSignatures , typename CDerivatives > struct ScalarConstraint{}; template< typename TSignatures > struct RestrictionProlongation{}; template< typename TSignatures , typename TDerivatives > struct PointEvaluator{}; template< typename TSignatures , typename TDerivatives > struct PointEvaluatorState{}; template< unsigned int ... TSignatures , unsigned int ... TDs > struct PointEvaluatorState< UIntPack< TSignatures ... > , UIntPack< TDs ... > > : public BaseFEMIntegrator::template PointEvaluatorState< sizeof ... ( TSignatures ) > { static_assert( sizeof...(TSignatures)==sizeof...(TDs) , "[ERROR] Degree and derivative dimensions don't match" ); static_assert( UIntPack< FEMSignature< TSignatures >::Degree ... >::template Compare< UIntPack< TDs ... > >::GreaterThanOrEqual , "[ERROR] PointEvaluatorState: More derivatives than degrees" ); static const unsigned int Dim = sizeof...(TSignatures); double value ( const int offset[] , const unsigned int derivatives[] ) const { return _value< Dim >( offset , derivatives ); } double subValue( const int offset[] , const unsigned int derivatives[] ) const { return _value< Dim-1 >( offset , derivatives ); } // Bypassing the "auto" keyword template< unsigned int _Dim > const double (*(values)( void ) const )[ UIntPack< TDs ... >::template Get< _Dim >()+1 ] { return std::template get< _Dim >( _oneDValues ).values; } protected: int _pointOffset[Dim]; template< unsigned int Degree , unsigned int D > struct _OneDValues { double values[ BSplineSupportSizes< Degree >::SupportSize ][ D+1 ]; double value( int dOff , unsigned int d ) const { if( dOff>=-BSplineSupportSizes< Degree >::SupportEnd && dOff<=-BSplineSupportSizes< Degree >::SupportStart && d<=D ) return values[ dOff+BSplineSupportSizes< Degree >::SupportEnd][d]; else return 0; } }; std::tuple< _OneDValues< FEMSignature< TSignatures >::Degree , TDs > ... > _oneDValues; template< unsigned int MaxDim=Dim , unsigned int I=0 > typename std::enable_if< I==MaxDim , double >::type _value( const int off[] , const unsigned int d[] ) const { return 1.; } template< unsigned int MaxDim=Dim , unsigned int I=0 > typename std::enable_if< I!=MaxDim , double >::type _value( const int off[] , const unsigned int d[] ) const { return std::get< I >( _oneDValues ).value( off[I]-_pointOffset[I] , d[I] ) * _value< MaxDim , I+1 >( off , d ); } template< typename T1 , typename T2 > friend struct PointEvaluator; }; template< unsigned int ... TSignatures , unsigned int ... TDs > struct PointEvaluator< UIntPack< TSignatures ... > , UIntPack< TDs ... > > : public BaseFEMIntegrator::template PointEvaluator< UIntPack< FEMSignature< TSignatures >::Degree ... > > { static_assert( sizeof...(TSignatures)==sizeof...(TDs) , "[ERROR] PointEvaluator: Degree and derivative dimensions don't match" ); static_assert( UIntPack< FEMSignature< TSignatures >::Degree ... >::template Compare< UIntPack< TDs ... > >::GreaterThanOrEqual , "[ERROR] PointEvaluator: More derivatives than degrees" ); static const unsigned int Dim = sizeof ... ( TSignatures ); typedef typename BaseFEMIntegrator::template PointEvaluator< UIntPack< FEMSignature< TSignatures >::Degree ... > > Base; PointEvaluator( unsigned int maxDepth ) : _maxDepth( maxDepth ) { _init(); } template< unsigned int ... EDs > void initEvaluationState( Point< double , Dim > p , unsigned int depth , PointEvaluatorState< UIntPack< TSignatures ... > , UIntPack< EDs ... > >& state ) const { unsigned int res = 1<<depth; for( int d=0 ; d<Dim ; d++ ) state._pointOffset[d] = (int)( p[d] * res ); initEvaluationState( p , depth , state._pointOffset , state ); } template< unsigned int ... EDs > void initEvaluationState( Point< double , Dim > p , unsigned int depth , const int* offset , PointEvaluatorState< UIntPack< TSignatures ... > , UIntPack< EDs ... > >& state ) const { static_assert( UIntPack< TDs ... >::template Compare< UIntPack< EDs ... > >::GreaterThanOrEqual , "[ERROR] PointEvaluator::init: More evaluation derivatives than stored derivatives" ); for( int d=0 ; d<Dim ; d++ ) state._pointOffset[d] = (int)offset[d]; _initEvaluationState( UIntPack< TSignatures ... >() , UIntPack< EDs ... >() , &p[0] , depth , state ); } protected: unsigned int _maxDepth; std::tuple< BSplineData< TSignatures , TDs > ... > _bSplineData; template< unsigned int I=0 > typename std::enable_if< I==Dim >::type _init( void ){} template< unsigned int I=0 > typename std::enable_if< I< Dim >::type _init( void ){ std::get< I >( _bSplineData ).reset( _maxDepth ) ; _init< I+1 >( ); } template< unsigned int I , unsigned int TSig , unsigned int D , typename State > void _setEvaluationState( const double* p , unsigned int depth , State& state ) const { static const int LeftSupportRadius = -BSplineSupportSizes< FEMSignature< TSig >::Degree >::SupportStart; static const int LeftPointSupportRadius = BSplineSupportSizes< FEMSignature< TSig >::Degree >::SupportEnd ; static const int RightSupportRadius = BSplineSupportSizes< FEMSignature< TSig >::Degree >::SupportEnd ; static const int RightPointSupportRadius = -BSplineSupportSizes< FEMSignature< TSig >::Degree >::SupportStart; for( int s=-LeftPointSupportRadius ; s<=RightPointSupportRadius ; s++ ) { int pIdx = state._pointOffset[I]; int fIdx = state._pointOffset[I]+s; double _p = p[I]; const Polynomial< FEMSignature< TSig >::Degree >* components = std::get< I >( _bSplineData )[depth].polynomialsAndOffset( _p , pIdx , fIdx ); for( int d=0 ; d<=D ; d++ ) std::get< I >( state._oneDValues ).values[ s+LeftPointSupportRadius ][d] = components[d]( _p ); } } template< typename State , unsigned int TSig , unsigned int ... TSigs , unsigned int D , unsigned int ... Ds > typename std::enable_if< sizeof...(TSigs)==0 >::type _initEvaluationState( UIntPack< TSig , TSigs ... > , UIntPack< D , Ds ... > , const double* p , unsigned int depth , State& state ) const { _setEvaluationState< Dim-1 , TSig , D >( p , depth , state ); } template< typename State , unsigned int TSig , unsigned int ... TSigs , unsigned int D , unsigned int ... Ds > typename std::enable_if< sizeof...(TSigs)!=0 >::type _initEvaluationState( UIntPack< TSig , TSigs ... > , UIntPack< D , Ds ... > , const double* p , unsigned int depth , State& state ) const { _setEvaluationState< Dim-1-sizeof...(TSigs) , TSig , D >( p , depth , state ); _initEvaluationState( UIntPack< TSigs ... >() , UIntPack< Ds ... >() , p , depth , state ); } }; template< unsigned int ... TSignatures > struct RestrictionProlongation< UIntPack< TSignatures ... > > : public BaseFEMIntegrator::template RestrictionProlongation< UIntPack< FEMSignature< TSignatures >::Degree ... > > { static const unsigned int Dim = sizeof ... ( TSignatures ); typedef typename BaseFEMIntegrator::template RestrictionProlongation< UIntPack< FEMSignature< TSignatures >::Degree ... > > Base; double upSampleCoefficient( const int pOff[] , const int cOff[] ) const { return _coefficient( pOff , cOff ); } void init( unsigned int depth ){ Base::init( depth ); } void init( void ){ _init( Base::highDepth() ); } protected: std::tuple< typename BSplineEvaluationData< TSignatures >::UpSampleEvaluator ... > _upSamplers; template< unsigned int D=0 > typename std::enable_if< D==Dim >::type _init( int highDepth ){ } template< unsigned int D=0 > typename std::enable_if< D< Dim >::type _init( int highDepth ){ std::get< D >( _upSamplers ).set( highDepth-1 ) ; _init< D+1 >( highDepth ); } template< unsigned int D=0 > typename std::enable_if< D==Dim , double >::type _coefficient( const int pOff[] , const int cOff[] ) const { return 1.; } template< unsigned int D=0 > typename std::enable_if< D< Dim , double >::type _coefficient( const int pOff[] , const int cOff[] ) const { return _coefficient< D+1 >( pOff , cOff ) * std::get< D >( _upSamplers ).value( pOff[D] , cOff[D] ); } }; template< unsigned int ... TSignatures , unsigned int ... TDerivatives , unsigned int ... CSignatures , unsigned int ... CDerivatives , unsigned int CDim > struct Constraint< UIntPack< TSignatures ... > , UIntPack< TDerivatives ... > , UIntPack< CSignatures ... > , UIntPack< CDerivatives ... > , CDim > : public BaseFEMIntegrator::template Constraint< UIntPack< FEMSignature< TSignatures >::Degree ... > , UIntPack< FEMSignature< CSignatures >::Degree ... > , CDim > { static_assert( sizeof ... ( TSignatures ) == sizeof ... ( CSignatures ) , "[ERROR] Test signatures and contraint signatures must have the same dimension" ); static_assert( sizeof ... ( TSignatures ) == sizeof ... ( TDerivatives ) , "[ERROR] Test signatures and derivatives must have the same dimension" ); static_assert( sizeof ... ( CSignatures ) == sizeof ... ( CDerivatives ) , "[ERROR] Constraint signatures and derivatives must have the same dimension" ); static_assert( UIntPack< FEMSignature< TSignatures >::Degree ... >::template Compare< UIntPack< TDerivatives ... > >::GreaterThanOrEqual , "[ERROR] Test functions cannot have more derivatives than the degree" ); static_assert( UIntPack< FEMSignature< CSignatures >::Degree ... >::template Compare< UIntPack< CDerivatives ... > >::GreaterThanOrEqual , "[ERROR] Test functions cannot have more derivatives than the degree" ); static const unsigned int Dim = sizeof ... ( TSignatures ); typedef typename BaseFEMIntegrator::template Constraint< UIntPack< FEMSignature< TSignatures >::Degree ... > , UIntPack< FEMSignature< CSignatures >::Degree ... > , CDim > Base; static const unsigned int TDerivativeSize = TensorDerivatives< UIntPack< TDerivatives ... > >::Size; static const unsigned int CDerivativeSize = TensorDerivatives< UIntPack< CDerivatives ... > >::Size; static inline void TFactorDerivatives( unsigned int idx , unsigned int d[ Dim ] ){ TensorDerivatives< UIntPack< TDerivatives ... > >::Factor( idx , d ); } static inline void CFactorDerivatives( unsigned int idx , unsigned int d[ Dim ] ){ TensorDerivatives< UIntPack< CDerivatives ... > >::Factor( idx , d ); } static inline unsigned int TDerivativeIndex( const unsigned int d[ Dim ] ){ return TensorDerivatives< UIntPack< TDerivatives ... > >::Index( d ); } static inline unsigned int CDerivativeIndex( const unsigned int d[ Dim ] ){ return TensorDerivatives< UIntPack< CDerivatives ... > >::Index( d ); } Matrix< double , TDerivativeSize , CDerivativeSize > weights[CDim]; Point< double , CDim > ccIntegrate( const int off1[] , const int off2[] ) const { return _integrate( INTEGRATE_CHILD_CHILD , off1 , off2 ); } Point< double , CDim > pcIntegrate( const int off1[] , const int off2[] ) const { return _integrate( INTEGRATE_PARENT_CHILD , off1 , off2 ); } Point< double , CDim > cpIntegrate( const int off1[] , const int off2[] ) const { return _integrate( INTEGRATE_CHILD_PARENT , off1 , off2 ); } void init( unsigned int depth ){ Base::init( depth ); } void init( void ) { _init( Base::highDepth() ); _weightedIndices.resize(0); for( unsigned int d1=0 ; d1<TDerivativeSize ; d1++ ) for( unsigned int d2=0 ; d2<CDerivativeSize ; d2++ ) { _WeightedIndices w(d1,d2); for( unsigned int c=0 ; c<CDim ; c++ ) if( weights[c](d1,d2)>0 ) w.indices.push_back( std::pair< unsigned int , double >( c , weights[c](d1,d2) ) ); if( w.indices.size() ) _weightedIndices.push_back(w); } } typename BaseFEMIntegrator::template RestrictionProlongation< UIntPack< FEMSignature< TSignatures >::Degree ... > >& tRestrictionProlongation( void ){ return _tRestrictionProlongation; } typename BaseFEMIntegrator::template RestrictionProlongation< UIntPack< FEMSignature< CSignatures >::Degree ... > >& cRestrictionProlongation( void ){ return _cRestrictionProlongation; } protected: RestrictionProlongation< UIntPack< TSignatures ... > > _tRestrictionProlongation; RestrictionProlongation< UIntPack< CSignatures ... > > _cRestrictionProlongation; struct _WeightedIndices { _WeightedIndices( unsigned int _d1=0 , unsigned int _d2=0 ) : d1(_d1) , d2(_d2) { ; } unsigned int d1 , d2; std::vector< std::pair< unsigned int , double > > indices; }; std::vector< _WeightedIndices > _weightedIndices; enum IntegrationType { INTEGRATE_CHILD_CHILD , INTEGRATE_PARENT_CHILD , INTEGRATE_CHILD_PARENT }; template< unsigned int _TSig , unsigned int _TDerivatives , unsigned int _CSig , unsigned int _CDerivatives > struct _Integrators { typename BSplineIntegrationData< _TSig , _CSig >::FunctionIntegrator::template Integrator< _TDerivatives , _CDerivatives > ccIntegrator; typename BSplineIntegrationData< _TSig , _CSig >::FunctionIntegrator::template ChildIntegrator< _TDerivatives , _CDerivatives > pcIntegrator; typename BSplineIntegrationData< _CSig , _TSig >::FunctionIntegrator::template ChildIntegrator< _CDerivatives , _TDerivatives > cpIntegrator; }; std::tuple< _Integrators< TSignatures , TDerivatives , CSignatures , CDerivatives > ... > _integrators; template< unsigned int D=0 > typename std::enable_if< D==Dim >::type _init( int depth ){ ; } template< unsigned int D=0 > typename std::enable_if< D< Dim >::type _init( int depth ) { std::get< D >( _integrators ).ccIntegrator.set( depth ); if( depth ) std::get< D >( _integrators ).pcIntegrator.set( depth-1 ) , std::get< D >( _integrators ).cpIntegrator.set( depth-1 ); _init< D+1 >( depth ); } template< unsigned int D=0 > typename std::enable_if< D==Dim , double >::type _integral( IntegrationType iType , const int off1[] , const int off2[] , const unsigned int d1[] , const unsigned int d2[] ) const { return 1.; } template< unsigned int D=0 > typename std::enable_if< D< Dim , double >::type _integral( IntegrationType iType , const int off1[] , const int off2[] , const unsigned int d1[] , const unsigned int d2[] ) const { double remainingIntegral = _integral< D+1 >( iType , off1 , off2 , d1 , d2 ); switch( iType ) { case INTEGRATE_CHILD_CHILD: return std::get< D >( _integrators ).ccIntegrator.dot( off1[D] , off2[D] , d1[D] , d2[D] ) * remainingIntegral; case INTEGRATE_PARENT_CHILD: return std::get< D >( _integrators ).pcIntegrator.dot( off1[D] , off2[D] , d1[D] , d2[D] ) * remainingIntegral; case INTEGRATE_CHILD_PARENT: return std::get< D >( _integrators ).cpIntegrator.dot( off2[D] , off1[D] , d2[D] , d1[D] ) * remainingIntegral; default: ERROR_OUT( "Undefined integration type" ); } return 0; } Point< double , CDim > _integrate( IntegrationType iType , const int off1[] , const int off[] ) const; }; template< unsigned int ... TSignatures , unsigned int ... TDerivatives , unsigned int ... CSignatures , unsigned int ... CDerivatives > struct ScalarConstraint< UIntPack< TSignatures ... > , UIntPack< TDerivatives ... > , UIntPack< CSignatures ... > , UIntPack< CDerivatives ... > > : public Constraint< UIntPack< TSignatures ... > , UIntPack< TDerivatives ... > , UIntPack< CSignatures ... > , UIntPack< CDerivatives ... > , 1 > { static const unsigned int Dim = sizeof ... ( TSignatures ); typedef typename BaseFEMIntegrator::template Constraint< UIntPack< FEMSignature< TSignatures >::Degree ... > , UIntPack< FEMSignature< CSignatures >::Degree ... > , 1 > Base; typedef Constraint< UIntPack< TSignatures ... > , UIntPack< TDerivatives ... > , UIntPack< CSignatures ... > , UIntPack< CDerivatives ... > , 1 > FullConstraint; using FullConstraint::weights; // [NOTE] We define the constructor using a recursive function call to take into account multiplicity (e.g. so that d^2/dxdy and d^2/dydx each contribute) ScalarConstraint( const std::initializer_list< double >& w ) { std::function< void ( unsigned int[] , const double[] , unsigned int ) > SetDerivativeWeights = [&]( unsigned int derivatives[Dim] , const double w[] , unsigned int d ) { unsigned int idx1 = FullConstraint::TDerivativeIndex( derivatives ) , idx2 = FullConstraint::CDerivativeIndex( derivatives ); weights[0][idx1][idx2] += w[0]; if( d>0 ) for( int dd=0 ; dd<Dim ; dd++ ){ derivatives[dd]++ ; SetDerivativeWeights( derivatives , w+1 , d-1 ) ; derivatives[dd]--; } }; static const unsigned int DMax = std::min< unsigned int >( UIntPack< TDerivatives ... >::Min() , UIntPack< CDerivatives ... >::Min() ); unsigned int derivatives[Dim]; double _w[DMax+1]; memset( _w , 0 , sizeof(_w) ); { unsigned int dd=0; for( typename std::initializer_list< double >::const_iterator iter=w.begin() ; iter!=w.end() && dd<=DMax ; dd++ , iter++ ) _w[dd] = *iter; } for( int d=0 ; d<Dim ; d++ ) derivatives[d] = 0; if( w.size() ) SetDerivativeWeights( derivatives , _w , std::min< unsigned int >( DMax+1 , (unsigned int)w.size() )-1 ); } }; template< unsigned int ... TSignatures , unsigned int ... TDerivatives > struct System< UIntPack< TSignatures ... > , UIntPack< TDerivatives ... > > : public BaseFEMIntegrator::template System< UIntPack< FEMSignature< TSignatures >::Degree... > > { static_assert( sizeof ... ( TSignatures ) == sizeof ... ( TDerivatives ) , "[ERROR] Test signatures and derivatives must have the same dimension" ); static const unsigned int Dim = sizeof ... ( TSignatures ); typedef typename BaseFEMIntegrator::template System< UIntPack< FEMSignature< TSignatures >::Degree... > > Base; System( const std::initializer_list< double >& w ) : _sc( w ){ ; } void init( unsigned int depth ){ Base::init( depth ); } void init( void ){ ( (BaseFEMIntegrator::template Constraint< UIntPack< FEMSignature< TSignatures >::Degree ... > , UIntPack< FEMSignature< TSignatures >::Degree ... > , 1 >&)_sc ).init( BaseFEMIntegrator::template System< UIntPack< FEMSignature< TSignatures >::Degree... > >::_highDepth ); } double ccIntegrate( const int off1[] , const int off2[] ) const { return _sc.ccIntegrate( off1 , off2 )[0]; } double pcIntegrate( const int off1[] , const int off2[] ) const { return _sc.pcIntegrate( off1 , off2 )[0]; } bool vanishesOnConstants( void ) const { return _sc.weights[0][0][0]==0; } typename BaseFEMIntegrator::template RestrictionProlongation< UIntPack< FEMSignature< TSignatures >::Degree ... > >& restrictionProlongation( void ){ return _sc.tRestrictionProlongation(); } protected: ScalarConstraint< UIntPack< TSignatures ... > , UIntPack< TDerivatives ... > , UIntPack< TSignatures ... > , UIntPack< TDerivatives ... > > _sc; }; }; ////////////////////////////////////////// template< unsigned int Dim > inline void SetGhostFlag( RegularTreeNode< Dim , FEMTreeNodeData >* node , bool flag ){ if( node && node->parent ) node->parent->nodeData.setGhostFlag( flag ); } template< unsigned int Dim > inline bool GetGhostFlag( const RegularTreeNode< Dim , FEMTreeNodeData >* node ){ return node==NULL || node->parent==NULL || node->parent->nodeData.getGhostFlag( ); } template< unsigned int Dim > inline bool IsActiveNode( const RegularTreeNode< Dim , FEMTreeNodeData >* node ){ return !GetGhostFlag< Dim >( node ); } template< unsigned int Dim , class Real , class Vertex > struct IsoSurfaceExtractor; template< unsigned int Dim , class Data > struct NodeSample { RegularTreeNode< Dim , FEMTreeNodeData >* node; Data data; }; template< unsigned int Dim , class Real > struct NodeAndPointSample { RegularTreeNode< Dim , FEMTreeNodeData >* node; ProjectiveData< Point< Real , Dim > , Real > sample; // bool is_confidence_point; // modified by dojo NodeAndPointSample() { //is_confidence_point = true; } // modified by dojo }; template< unsigned int Dim , class Real > using NodeSimplices = NodeSample< Dim , std::vector< Simplex< Real , Dim , Dim-1 > > >; template< typename T > struct WindowLoopData{ }; template< unsigned int ... Sizes > struct WindowLoopData< UIntPack< Sizes ... > > { static const int Dim = sizeof ... ( Sizes ); unsigned int size[1<<Dim]; unsigned int indices[1<<Dim][ WindowSize< UIntPack< Sizes ... > >::Size ]; WindowLoopData( std::function< void ( int c , int* , int* ) > boundsFunction ) { int start[Dim] , end[Dim]; for( int c=0 ; c<(1<<Dim) ; c++ ) { size[c] = 0; boundsFunction( c , start , end ); unsigned int idx[Dim]; WindowLoop< Dim >::Run ( start , end , [&]( int d , int i ){ idx[d] = i; } , [&]( void ){ indices[c][ size[c]++ ] = GetWindowIndex( UIntPack< Sizes ... >() , idx ); } ); } } }; template< class Data > void AddAtomic( Data& a , const Data& b ) { #pragma omp critical a += b; } template< class Real , unsigned int Dim > void AddAtomic( Point< Real , Dim >& a , const Point< Real , Dim >& b ) { for( int d=0 ; d<Dim ; d++ ) AddAtomic( a[d] , b[d] ); } void AddAtomic( float& a , const float& b ) { #pragma omp atomic a += b; } void AddAtomic( double& a , const double& b ) { #pragma omp atomic a += b; } template< class Data > bool IsZero( const Data& data ){ return false; } template< class Real , unsigned int Dim > bool IsZero( const Point< Real , Dim >& d ) { bool zero = true; for( int i=0 ; i<Dim ; i++ ) zero &= (d[i]==0); return zero; } bool IsZero( const float& f ){ return f==0.f; } bool IsZero( const double& f ){ return f==0.; } template< unsigned int Dim , class Real > class FEMTree { public: typedef RegularTreeNode< Dim , FEMTreeNodeData > FEMTreeNode; Allocator< FEMTreeNode >* nodeAllocator; bool aux_vfield_mode; // modified by dojo protected: template< unsigned int _Dim , class _Real , class Vertex > friend struct IsoSurfaceExtractor; std::atomic< int > _nodeCount; void _nodeInitializer( FEMTreeNode& node ){ node.nodeData.nodeIndex = _nodeCount++; } struct _NodeInitializer { FEMTree& femTree; _NodeInitializer( FEMTree& f ) : femTree(f){;} void operator() ( FEMTreeNode& node ){ femTree._nodeInitializer( node ); } }; public: typedef int LocalDepth; typedef int LocalOffset[Dim]; int nodeCount( void ) const { return _nodeCount; } typedef NodeAndPointSample< Dim , Real > PointSample; typedef typename FEMTreeNode::template NeighborKey< IsotropicUIntPack< Dim , 1 > , IsotropicUIntPack< Dim , 1 > > OneRingNeighborKey; typedef typename FEMTreeNode::template ConstNeighborKey< IsotropicUIntPack< Dim , 1 > , IsotropicUIntPack< Dim , 1 > > ConstOneRingNeighborKey; typedef typename FEMTreeNode::template Neighbors< IsotropicUIntPack< Dim , 3 > > OneRingNeighbors; typedef typename FEMTreeNode::template ConstNeighbors< IsotropicUIntPack< Dim , 3 > > ConstOneRingNeighbors; template< typename FEMDegreePack > using BaseSystem = typename BaseFEMIntegrator::template System< FEMDegreePack >; template< typename FEMSigPack , typename DerivativePack > using PointEvaluator = typename FEMIntegrator::template PointEvaluator< FEMSigPack , DerivativePack >; template< typename FEMSigPack , typename DerivativePack > using PointEvaluatorState = typename FEMIntegrator::template PointEvaluatorState< FEMSigPack , DerivativePack >; template< typename FEMDegreePack > using CCStencil = typename BaseSystem< FEMDegreePack >::CCStencil; template< typename FEMDegreePack > using PCStencils = typename BaseSystem< FEMDegreePack >::PCStencils; template< unsigned int ... FEMSigs > bool isValidFEMNode( UIntPack< FEMSigs ... > , const FEMTreeNode* node ) const; bool isValidSpaceNode( const FEMTreeNode* node ) const; const FEMTreeNode* leaf( Point< Real , Dim > p ) const; FEMTreeNode* leaf( Point< Real , Dim > p , LocalDepth maxDepth=-1 ); // [NOTE] In the case that T != double, we require both operators() for computing the system dual template< typename T , unsigned int PointD > struct InterpolationInfo { virtual void range( const FEMTreeNode* node , size_t& begin , size_t& end ) const = 0; virtual Point< T , CumulativeDerivatives< Dim , PointD >::Size > operator() ( size_t pointIdx ) const = 0; virtual Point< T , CumulativeDerivatives< Dim , PointD >::Size > operator() ( size_t pointIdx , const Point< T , CumulativeDerivatives< Dim , PointD >::Size >& dValues ) const = 0; virtual Point< double , CumulativeDerivatives< Dim , PointD >::Size > operator() ( size_t pointIdx , const Point< double , CumulativeDerivatives< Dim , PointD >::Size >& dValues ) const = 0; virtual const DualPointInfo< Dim , Real , T , PointD >& operator[]( size_t pointIdx ) const = 0; virtual bool constrainsDCTerm( void ) const = 0; virtual ~InterpolationInfo( void ){} DualPointInfo< Dim , Real , T , PointD >& operator[]( size_t pointIndex ){ return const_cast< DualPointInfo< Dim , Real , T , PointD >& >( ( ( const InterpolationInfo* )this )->operator[]( pointIndex ) ); } }; template< unsigned int PointD > struct InterpolationInfo< double , PointD > { virtual void range( const FEMTreeNode* node , size_t& begin , size_t& end ) const = 0; virtual Point< double , CumulativeDerivatives< Dim , PointD >::Size > operator() ( size_t pointIdx ) const = 0; virtual Point< double , CumulativeDerivatives< Dim , PointD >::Size > operator() ( size_t pointIdx , const Point< double , CumulativeDerivatives< Dim , PointD >::Size >& dValues ) const = 0; virtual const DualPointInfo< Dim , Real , double , PointD >& operator[]( size_t pointIdx ) const = 0; virtual bool constrainsDCTerm( void ) const = 0; virtual ~InterpolationInfo( void ){} DualPointInfo< Dim , Real , double , PointD >& operator[]( size_t pointIndex ){ return const_cast< DualPointInfo< Dim , Real , double , PointD >& >( ( ( const InterpolationInfo* )this )->operator[]( pointIndex ) ); } }; template< typename T , unsigned int PointD , typename ConstraintDual , typename SystemDual > struct ApproximatePointInterpolationInfo : public InterpolationInfo< T , PointD > { void range( const FEMTreeNode* node , size_t& begin , size_t& end ) const { int idx = _iData.index( node ); if( idx<0 ) begin = end = 0; else begin = idx , end = idx+1; } bool constrainsDCTerm( void ) const { return _constrainsDCTerm; } const DualPointInfo< Dim , Real , T , PointD >& operator[]( size_t pointIdx ) const { return _iData[ (int)pointIdx ]; } Point< T , CumulativeDerivatives< Dim , PointD >::Size > operator() ( size_t pointIdx ) const { return _constraintDual( _iData[ (int)pointIdx ].position ); } Point< T , CumulativeDerivatives< Dim , PointD >::Size > operator() ( size_t pointIdx , const Point< T , CumulativeDerivatives< Dim , PointD >::Size >& dValues ) const { return _systemDual( _iData[ (int)pointIdx ].position , dValues ); } Point< double , CumulativeDerivatives< Dim , PointD >::Size > operator() ( size_t pointIdx , const Point< double , CumulativeDerivatives< Dim , PointD >::Size >& dValues ) const { return _systemDual( _iData[ (int)pointIdx ].position , dValues ); } ApproximatePointInterpolationInfo( ConstraintDual constraintDual , SystemDual systemDual , bool constrainsDCTerm ) : _constraintDual( constraintDual ) , _systemDual( systemDual ) , _constrainsDCTerm( constrainsDCTerm ) { } protected: SparseNodeData< DualPointInfo< Dim , Real , T , PointD > , ZeroUIntPack< Dim > > _iData; bool _constrainsDCTerm; ConstraintDual _constraintDual; SystemDual _systemDual; friend class FEMTree< Dim , Real >; }; template< unsigned int PointD , typename ConstraintDual , typename SystemDual > struct ApproximatePointInterpolationInfo< double , PointD , ConstraintDual , SystemDual > : public InterpolationInfo< double , PointD > { typedef double T; void range( const FEMTreeNode* node , size_t& begin , size_t& end ) const { int idx = _iData.index( node ); if( idx<0 ) begin = end = 0; else begin = idx , end = idx+1; } bool constrainsDCTerm( void ) const { return _constrainsDCTerm; } const DualPointInfo< Dim , Real , T , PointD >& operator[]( size_t pointIdx ) const { return _iData[ (int)pointIdx ]; } Point< T , CumulativeDerivatives< Dim , PointD >::Size > operator() ( size_t pointIdx ) const { return _constraintDual( _iData[ (int)pointIdx ].position ); } Point< T , CumulativeDerivatives< Dim , PointD >::Size > operator() ( size_t pointIdx , const Point< T , CumulativeDerivatives< Dim , PointD >::Size >& dValues ) const { return _systemDual( _iData[ (int)pointIdx ].position , dValues ); } ApproximatePointInterpolationInfo( ConstraintDual constraintDual , SystemDual systemDual , bool constrainsDCTerm ) : _constraintDual( constraintDual ) , _systemDual( systemDual ) , _constrainsDCTerm( constrainsDCTerm ) { } protected: SparseNodeData< DualPointInfo< Dim , Real , T , PointD > , ZeroUIntPack< Dim > > _iData; bool _constrainsDCTerm; ConstraintDual _constraintDual; SystemDual _systemDual; friend class FEMTree< Dim , Real >; }; template< typename T , typename Data , unsigned int PointD , typename ConstraintDual , typename SystemDual > struct ApproximatePointAndDataInterpolationInfo : public InterpolationInfo< T , PointD > { void range( const FEMTreeNode* node , size_t& begin , size_t& end ) const { int idx = _iData.index( node ); if( idx<0 ) begin = end = 0; else begin = idx , end = idx+1; } bool constrainsDCTerm( void ) const { return _constrainsDCTerm; } const DualPointInfo< Dim , Real , T , PointD >& operator[]( size_t pointIdx ) const { return _iData[ (int)pointIdx ].pointInfo; } Point< T , CumulativeDerivatives< Dim , PointD >::Size > operator() ( size_t pointIdx ) const { return _constraintDual( _iData[ (int)pointIdx ].pointInfo.position , _iData[ (int)pointIdx ].data ); } Point< T , CumulativeDerivatives< Dim , PointD >::Size > operator() ( size_t pointIdx , const Point< T , CumulativeDerivatives< Dim , PointD >::Size >& dValues ) const { return _systemDual( _iData[ (int)pointIdx ].pointInfo.position , _iData[ (int)pointIdx ].data , dValues ); } Point< double , CumulativeDerivatives< Dim , PointD >::Size > operator() ( size_t pointIdx , const Point< double , CumulativeDerivatives< Dim , PointD >::Size >& dValues ) const { return _systemDual( _iData[ (int)pointIdx ].pointInfo.position , _iData[ (int)pointIdx ].data , dValues ); } ApproximatePointAndDataInterpolationInfo( ConstraintDual constraintDual , SystemDual systemDual , bool constrainsDCTerm ) : _constraintDual( constraintDual ) , _systemDual( systemDual ) , _constrainsDCTerm( constrainsDCTerm ) { } protected: SparseNodeData< DualPointAndDataInfo< Dim , Real , Data , T , PointD > , ZeroUIntPack< Dim > > _iData; bool _constrainsDCTerm; ConstraintDual _constraintDual; SystemDual _systemDual; friend class FEMTree< Dim , Real >; }; template< typename Data , unsigned int PointD , typename ConstraintDual , typename SystemDual > struct ApproximatePointAndDataInterpolationInfo< double , Data , PointD , ConstraintDual , SystemDual > : public InterpolationInfo< double , PointD > { typedef double T; void range( const FEMTreeNode* node , size_t& begin , size_t& end ) const { int idx = _iData.index( node ); if( idx<0 ) begin = end = 0; else begin = idx , end = idx+1; } bool constrainsDCTerm( void ) const { return _constrainsDCTerm; } const DualPointInfo< Dim , Real , T , PointD >& operator[]( size_t pointIdx ) const { return _iData[ (int)pointIdx ].pointInfo; } Point< T , CumulativeDerivatives< Dim , PointD >::Size > operator() ( size_t pointIdx ) const { return _constraintDual( _iData[ (int)pointIdx ].pointInfo.position , _iData[ (int)pointIdx ].data ); } Point< T , CumulativeDerivatives< Dim , PointD >::Size > operator() ( size_t pointIdx , const Point< T , CumulativeDerivatives< Dim , PointD >::Size >& dValues ) const { return _systemDual( _iData[ (int)pointIdx ].pointInfo.position , _iData[ (int)pointIdx ].data , dValues ); } ApproximatePointAndDataInterpolationInfo( ConstraintDual constraintDual , SystemDual systemDual , bool constrainsDCTerm ) : _constraintDual( constraintDual ) , _systemDual( systemDual ) , _constrainsDCTerm( constrainsDCTerm ) { } protected: SparseNodeData< DualPointAndDataInfo< Dim , Real , Data , T , PointD > , ZeroUIntPack< Dim > > _iData; bool _constrainsDCTerm; ConstraintDual _constraintDual; SystemDual _systemDual; friend class FEMTree< Dim , Real >; }; template< typename T , unsigned int PointD , typename ConstraintDual , typename SystemDual > struct ApproximateChildPointInterpolationInfo : public InterpolationInfo< T , PointD > { void range( const FEMTreeNode* node , size_t& begin , size_t& end ) const { int idx = _iData.index( node ); if( idx<0 ) begin = end = 0; else begin = (idx<<Dim) , end = (idx<<Dim) | _iData[idx].size(); } bool constrainsDCTerm( void ) const { return _constrainsDCTerm; } const DualPointInfo< Dim , Real , T , PointD >& operator[]( size_t pointIdx ) const { return __iData(pointIdx); } Point< T , CumulativeDerivatives< Dim , PointD >::Size > operator() ( size_t pointIdx ) const { return _constraintDual( __iData(pointIdx).position ); } Point< T , CumulativeDerivatives< Dim , PointD >::Size > operator() ( size_t pointIdx , const Point< T , CumulativeDerivatives< Dim , PointD >::Size >& dValues ) const { return _systemDual( __iData(pointIdx).position , dValues ); } Point< double , CumulativeDerivatives< Dim , PointD >::Size > operator() ( size_t pointIdx , const Point< double , CumulativeDerivatives< Dim , PointD >::Size >& dValues ) const { return _systemDual( __iData(pointIdx).position , dValues ); } ApproximateChildPointInterpolationInfo( ConstraintDual constraintDual , SystemDual systemDual , bool constrainsDCTerm ) : _constraintDual( constraintDual ) , _systemDual( systemDual ) , _constrainsDCTerm( constrainsDCTerm ) { } protected: static const unsigned int _Mask = (1<<Dim)-1; SparseNodeData< DualPointInfoBrood< Dim , Real , T , PointD > , ZeroUIntPack< Dim > > _iData; DualPointInfo< Dim , Real , T , PointD >& __iData( size_t pointIdx ){ return _iData[ (int)(pointIdx>>Dim) ][ pointIdx & _Mask ]; } const DualPointInfo< Dim , Real , T , PointD >& __iData( size_t pointIdx ) const { return _iData[ (int)(pointIdx>>Dim) ][ pointIdx & _Mask ]; } bool _constrainsDCTerm; ConstraintDual _constraintDual; SystemDual _systemDual; friend class FEMTree< Dim , Real >; }; template< unsigned int PointD , typename ConstraintDual , typename SystemDual > struct ApproximateChildPointInterpolationInfo< double , PointD , ConstraintDual , SystemDual > : public InterpolationInfo< double , PointD > { typedef double T; void range( const FEMTreeNode* node , size_t& begin , size_t& end ) const { int idx = _iData.index( node ); if( idx<0 ) begin = end = 0; else begin = (idx<<Dim) , end = (idx<<Dim) | _iData[idx].size(); } bool constrainsDCTerm( void ) const { return _constrainsDCTerm; } const DualPointInfo< Dim , Real , T , PointD >& operator[]( size_t pointIdx ) const { return __iData(pointIdx); } Point< T , CumulativeDerivatives< Dim , PointD >::Size > operator() ( size_t pointIdx ) const { return _constraintDual( __iData(pointIdx).position ); } Point< T , CumulativeDerivatives< Dim , PointD >::Size > operator() ( size_t pointIdx , const Point< T , CumulativeDerivatives< Dim , PointD >::Size >& dValues ) const { return _systemDual( __iData(pointIdx).position , dValues ); } ApproximateChildPointInterpolationInfo( ConstraintDual constraintDual , SystemDual systemDual , bool constrainsDCTerm ) : _constraintDual( constraintDual ) , _systemDual( systemDual ) , _constrainsDCTerm( constrainsDCTerm ) { } protected: static const unsigned int _Mask = (1<<Dim)-1; SparseNodeData< DualPointInfoBrood< Dim , Real , T , PointD > , ZeroUIntPack< Dim > > _iData; DualPointInfo< Dim , Real , T , PointD >& __iData( size_t pointIdx ){ return _iData[ (int)(pointIdx>>Dim) ][ pointIdx & _Mask ]; } const DualPointInfo< Dim , Real , T , PointD >& __iData( size_t pointIdx ) const { return _iData[ (int)(pointIdx>>Dim) ][ pointIdx & _Mask ]; } bool _constrainsDCTerm; ConstraintDual _constraintDual; SystemDual _systemDual; friend class FEMTree< Dim , Real >; }; template< typename T , typename Data , unsigned int PointD , typename ConstraintDual , typename SystemDual > struct ApproximateChildPointAndDataInterpolationInfo : public InterpolationInfo< T , PointD > { void range( const FEMTreeNode* node , size_t& begin , size_t& end ) const { int idx = _iData.index( node ); if( idx<0 ) begin = end = 0; else begin = (idx<<Dim) , end = (idx<<Dim) | _iData[idx].size(); } bool constrainsDCTerm( void ) const { return _constrainsDCTerm; } const DualPointInfo< Dim , Real , T , PointD >& operator[]( size_t pointIdx ) const { return __iData(pointIdx).pointInfo; } Point< T , CumulativeDerivatives< Dim , PointD >::Size > operator() ( size_t pointIdx ) const { return _constraintDual( __iData(pointIdx).pointInfo.position , __iData(pointIdx).data ); } Point< T , CumulativeDerivatives< Dim , PointD >::Size > operator() ( size_t pointIdx , const Point< T , CumulativeDerivatives< Dim , PointD >::Size >& dValues ) const { return _systemDual( __iData(pointIdx).pointInfo.position , __iData(pointIdx).data , dValues ); } Point< double , CumulativeDerivatives< Dim , PointD >::Size > operator() ( size_t pointIdx , const Point< double , CumulativeDerivatives< Dim , PointD >::Size >& dValues ) const { return _systemDual( __iData(pointIdx).pointInfo.position , __iData(pointIdx).data , dValues ); } ApproximateChildPointAndDataInterpolationInfo( ConstraintDual constraintDual , SystemDual systemDual , bool constrainsDCTerm ) : _constraintDual( constraintDual ) , _systemDual( systemDual ) , _constrainsDCTerm( constrainsDCTerm ) { } protected: static const unsigned int _Mask = (1<<Dim)-1; SparseNodeData< DualPointAndDataInfoBrood< Dim , Real , Data , T , PointD > , ZeroUIntPack< Dim > > _iData; DualPointAndDataInfo< Dim , Real , Data , T , PointD >& __iData( size_t pointIdx ){ return _iData[ (int)(pointIdx>>Dim) ][ pointIdx & _Mask ]; } const DualPointAndDataInfo< Dim , Real , Data , T , PointD >& __iData( size_t pointIdx ) const { return _iData[ (int)(pointIdx>>Dim) ][ pointIdx & _Mask ]; } bool _constrainsDCTerm; ConstraintDual _constraintDual; SystemDual _systemDual; friend class FEMTree< Dim , Real >; }; template< typename Data , unsigned int PointD , typename ConstraintDual , typename SystemDual > struct ApproximateChildPointAndDataInterpolationInfo< double , Data , PointD , ConstraintDual , SystemDual > : public InterpolationInfo< double , PointD > { typedef double T; void range( const FEMTreeNode* node , size_t& begin , size_t& end ) const { int idx = _iData.index( node ); if( idx<0 ) begin = end = 0; else begin = (idx<<Dim) , end = (idx<<Dim) | _iData[idx].size(); } bool constrainsDCTerm( void ) const { return _constrainsDCTerm; } const DualPointInfo< Dim , Real , T , PointD >& operator[]( size_t pointIdx ) const { return __iData(pointIdx).pointInfo; } Point< T , CumulativeDerivatives< Dim , PointD >::Size > operator() ( size_t pointIdx ) const { return _constraintDual( __iData(pointIdx).pointInfo.position , __iData(pointIdx).data ); } Point< T , CumulativeDerivatives< Dim , PointD >::Size > operator() ( size_t pointIdx , const Point< T , CumulativeDerivatives< Dim , PointD >::Size >& dValues ) const { return _systemDual( __iData(pointIdx).pointInfo.position , __iData(pointIdx).data , dValues ); } ApproximateChildPointAndDataInterpolationInfo( ConstraintDual constraintDual , SystemDual systemDual , bool constrainsDCTerm ) : _constraintDual( constraintDual ) , _systemDual( systemDual ) , _constrainsDCTerm( constrainsDCTerm ) { } protected: static const unsigned int _Mask = (1<<Dim)-1; SparseNodeData< DualPointAndDataInfoBrood< Dim , Real , Data , T , PointD > , ZeroUIntPack< Dim > > _iData; DualPointAndDataInfo< Dim , Real , Data , T , PointD >& __iData( size_t pointIdx ){ return _iData[ (int)(pointIdx>>Dim) ][ pointIdx & _Mask ]; } const DualPointAndDataInfo< Dim , Real , Data , T , PointD >& __iData( size_t pointIdx ) const { return _iData[ (int)(pointIdx>>Dim) ][ pointIdx & _Mask ]; } bool _constrainsDCTerm; ConstraintDual _constraintDual; SystemDual _systemDual; friend class FEMTree< Dim , Real >; }; template< typename T , unsigned int PointD , typename ConstraintDual , typename SystemDual > struct ExactPointInterpolationInfo : public InterpolationInfo< T , PointD > { void range( const FEMTreeNode* node , size_t& begin , size_t& end ) const { begin = _sampleSpan[ node->nodeData.nodeIndex ].first , end = _sampleSpan[ node->nodeData.nodeIndex ].second; } bool constrainsDCTerm( void ) const { return _constrainsDCTerm; } const DualPointInfo< Dim , Real , T , PointD >& operator[]( size_t pointIdx ) const { return _iData[ (int)pointIdx ]; } Point< T , CumulativeDerivatives< Dim , PointD >::Size > operator() ( size_t pointIdx ) const { return _constraintDual( _iData[ (int)pointIdx ].position ); } Point< T , CumulativeDerivatives< Dim , PointD >::Size > operator() ( size_t pointIdx , const Point< T , CumulativeDerivatives< Dim , PointD >::Size >& dValues ) const { return _systemDual( _iData[ (int)pointIdx ].position , dValues ); } Point< double , CumulativeDerivatives< Dim , PointD >::Size > operator() ( size_t pointIdx , const Point< double , CumulativeDerivatives< Dim , PointD >::Size >& dValues ) const { return _systemDual( _iData[ (int)pointIdx ].position , dValues ); } ExactPointInterpolationInfo( ConstraintDual constraintDual , SystemDual systemDual , bool constrainsDCTerm ) : _constraintDual( constraintDual ) , _systemDual( systemDual ) , _constrainsDCTerm( constrainsDCTerm ) { } protected: void _init( const class FEMTree< Dim , Real >& tree , const std::vector< PointSample >& samples , bool noRescale ); std::vector< std::pair< int , int > > _sampleSpan; std::vector< DualPointInfo< Dim , Real , T , PointD > > _iData; bool _constrainsDCTerm; ConstraintDual _constraintDual; SystemDual _systemDual; friend class FEMTree< Dim , Real >; }; template< unsigned int PointD , typename ConstraintDual , typename SystemDual > struct ExactPointInterpolationInfo< double , PointD , ConstraintDual , SystemDual > : public InterpolationInfo< double , PointD > { typedef double T; void range( const FEMTreeNode* node , size_t& begin , size_t& end ) const { begin = _sampleSpan[ node->nodeData.nodeIndex ].first , end = _sampleSpan[ node->nodeData.nodeIndex ].second; } bool constrainsDCTerm( void ) const { return _constrainsDCTerm; } const DualPointInfo< Dim , Real , T , PointD >& operator[]( size_t pointIdx ) const { return _iData[ (int)pointIdx ]; } Point< T , CumulativeDerivatives< Dim , PointD >::Size > operator() ( size_t pointIdx ) const { return _constraintDual( _iData[ (int)pointIdx ].position ); } Point< T , CumulativeDerivatives< Dim , PointD >::Size > operator() ( size_t pointIdx , const Point< T , CumulativeDerivatives< Dim , PointD >::Size >& dValues ) const { return _systemDual( _iData[ (int)pointIdx ].position , dValues ); } ExactPointInterpolationInfo( ConstraintDual constraintDual , SystemDual systemDual , bool constrainsDCTerm ) : _constraintDual( constraintDual ) , _systemDual( systemDual ) , _constrainsDCTerm( constrainsDCTerm ) { } protected: void _init( const class FEMTree< Dim , Real >& tree , const std::vector< PointSample >& samples , bool noRescale ); std::vector< std::pair< int , int > > _sampleSpan; std::vector< DualPointInfo< Dim , Real , T , PointD > > _iData; bool _constrainsDCTerm; ConstraintDual _constraintDual; SystemDual _systemDual; friend class FEMTree< Dim , Real >; }; template< typename T , typename Data , unsigned int PointD , typename ConstraintDual , typename SystemDual > struct _ExactPointAndDataInterpolationInfo : public InterpolationInfo< T , PointD > { _ExactPointAndDataInterpolationInfo( ConstraintDual constraintDual , SystemDual systemDual , bool constrainsDCTerm ) : _constraintDual( constraintDual ) , _systemDual( systemDual ) , _constrainsDCTerm( constrainsDCTerm ) { } protected: void _init( const class FEMTree< Dim , Real >& tree , const std::vector< PointSample >& samples , ConstPointer( Data ) sampleData , bool noRescale ); std::vector< std::pair< int , int > > _sampleSpan; std::vector< DualPointAndDataInfo< Dim , Real , Data , T , PointD > > _iData; bool _constrainsDCTerm; ConstraintDual _constraintDual; SystemDual _systemDual; friend class FEMTree< Dim , Real >; }; template< typename T , typename Data , unsigned int PointD , typename ConstraintDual , typename SystemDual > struct ExactPointAndDataInterpolationInfo : public _ExactPointAndDataInterpolationInfo< T , Data , PointD , ConstraintDual , SystemDual > { using _ExactPointAndDataInterpolationInfo< T , Data , PointD , ConstraintDual , SystemDual >::_sampleSpan; using _ExactPointAndDataInterpolationInfo< T , Data , PointD , ConstraintDual , SystemDual >::_constrainsDCTerm; using _ExactPointAndDataInterpolationInfo< T , Data , PointD , ConstraintDual , SystemDual >::_iData; using _ExactPointAndDataInterpolationInfo< T , Data , PointD , ConstraintDual , SystemDual >::_constraintDual; using _ExactPointAndDataInterpolationInfo< T , Data , PointD , ConstraintDual , SystemDual >::_systemDual; void range( const FEMTreeNode* node , size_t& begin , size_t& end ) const { begin = _sampleSpan[ node->nodeData.nodeIndex ].first , end = _sampleSpan[ node->nodeData.nodeIndex ].second; } bool constrainsDCTerm( void ) const { return _constrainsDCTerm; } const DualPointInfo< Dim , Real , T , PointD >& operator[]( size_t pointIdx ) const { return _iData[ (int)pointIdx ].pointInfo; } Point< T , CumulativeDerivatives< Dim , PointD >::Size > operator() ( size_t pointIdx ) const { return _constraintDual( _iData[ pointIdx ].pointInfo.position , _iData[ (int)pointIdx ].data ); } Point< T , CumulativeDerivatives< Dim , PointD >::Size > operator() ( size_t pointIdx , const Point< T , CumulativeDerivatives< Dim , PointD >::Size >& dValues ) const { return _systemDual( _iData[ (int)pointIdx ].pointInfo.position , _iData[ (int)pointIdx ].data , dValues ); } Point< double , CumulativeDerivatives< Dim , PointD >::Size > operator() ( size_t pointIdx , const Point< double , CumulativeDerivatives< Dim , PointD >::Size >& dValues ) const { return _systemDual( _iData[ (int)pointIdx ].pointInfo.position , _iData[ (int)pointIdx ].data , dValues ); } ExactPointAndDataInterpolationInfo( ConstraintDual constraintDual , SystemDual systemDual , bool constrainsDCTerm ) : _ExactPointAndDataInterpolationInfo< T , Data , PointD , ConstraintDual , SystemDual >( constraintDual , systemDual , constrainsDCTerm ) { } }; template< typename Data , unsigned int PointD , typename ConstraintDual , typename SystemDual > struct ExactPointAndDataInterpolationInfo< double , Data , PointD , ConstraintDual , SystemDual > : public _ExactPointAndDataInterpolationInfo< double , Data , PointD , ConstraintDual , SystemDual > { using _ExactPointAndDataInterpolationInfo< double , Data , PointD , ConstraintDual , SystemDual >::_sampleSpan; using _ExactPointAndDataInterpolationInfo< double , Data , PointD , ConstraintDual , SystemDual >::_constrainsDCTerm; using _ExactPointAndDataInterpolationInfo< double , Data , PointD , ConstraintDual , SystemDual >::_iData; void range( const FEMTreeNode* node , size_t& begin , size_t& end ) const { begin = _sampleSpan[ node->nodeData.nodeIndex ].first , end = _sampleSpan[ node->nodeData.nodeIndex ].second; } bool constrainsDCTerm( void ) const { return _constrainsDCTerm; } const DualPointInfo< Dim , Real , double , PointD >& operator[]( size_t pointIdx ) const { return _iData[ (int)pointIdx ].pointInfo; } Point< double , CumulativeDerivatives< Dim , PointD >::Size > operator() ( size_t pointIdx ) const { return _constraintDual( _iData[ pointIdx ].pointInfo.position , _iData[ (int)pointIdx ].data ); } Point< double , CumulativeDerivatives< Dim , PointD >::Size > operator() ( size_t pointIdx , const Point< double , CumulativeDerivatives< Dim , PointD >::Size >& dValues ) const { return _systemDual( _iData[ (int)pointIdx ].pointInfo.position , _iData[ (int)pointIdx ].data , dValues ); } ExactPointAndDataInterpolationInfo( ConstraintDual constraintDual , SystemDual systemDual , bool constrainsDCTerm ) : _ExactPointAndDataInterpolationInfo< double , Data , PointD , ConstraintDual , SystemDual >( constraintDual , systemDual , constrainsDCTerm ) { } }; template< typename T , unsigned int PointD , typename ConstraintDual , typename SystemDual > static ApproximatePointInterpolationInfo< T , PointD , ConstraintDual , SystemDual >* InitializeApproximatePointInterpolationInfo( const class FEMTree< Dim , Real >& tree , const std::vector< PointSample >& samples , ConstraintDual constraintDual , SystemDual systemDual , bool constrainsDCTerm , int adaptiveExponent ) { ApproximatePointInterpolationInfo< T , PointD , ConstraintDual , SystemDual >* a = new ApproximatePointInterpolationInfo< T , PointD , ConstraintDual , SystemDual >( constraintDual , systemDual , constrainsDCTerm ); a->_iData = tree._densifyInterpolationInfoAndSetDualConstraints< T , PointD >( samples , constraintDual , adaptiveExponent ); return a; } template< typename T , typename Data , unsigned int PointD , typename ConstraintDual , typename SystemDual > static ApproximatePointAndDataInterpolationInfo< T , Data , PointD , ConstraintDual , SystemDual >* InitializeApproximatePointAndDataInterpolationInfo( const class FEMTree< Dim , Real >& tree , const std::vector< PointSample >& samples , ConstPointer( Data ) sampleData , ConstraintDual constraintDual , SystemDual systemDual , bool constrainsDCTerm , int adaptiveExponent ) { ApproximatePointAndDataInterpolationInfo< T , Data , PointD , ConstraintDual , SystemDual >* a = new ApproximatePointAndDataInterpolationInfo< T , Data , PointD , ConstraintDual , SystemDual >( constraintDual , systemDual , constrainsDCTerm ); a->_iData = tree._densifyInterpolationInfoAndSetDualConstraints< T , Data , PointD >( samples , sampleData , constraintDual , adaptiveExponent ); return a; } template< typename T , unsigned int PointD , typename ConstraintDual , typename SystemDual > static ApproximateChildPointInterpolationInfo< T , PointD , ConstraintDual , SystemDual >* InitializeApproximateChildPointInterpolationInfo( const class FEMTree< Dim , Real >& tree , const std::vector< PointSample >& samples , ConstraintDual constraintDual , SystemDual systemDual , bool constrainsDCTerm , bool noRescale ) { ApproximateChildPointInterpolationInfo< T , PointD , ConstraintDual , SystemDual >* a = new ApproximateChildPointInterpolationInfo< T , PointD , ConstraintDual , SystemDual >( constraintDual , systemDual , constrainsDCTerm ); a->_iData = tree._densifyChildInterpolationInfoAndSetDualConstraints< T , PointD >( samples , constraintDual , noRescale ); return a; } template< typename T , typename Data , unsigned int PointD , typename ConstraintDual , typename SystemDual > static ApproximateChildPointAndDataInterpolationInfo< T , Data , PointD , ConstraintDual , SystemDual >* InitializeApproximateChildPointAndDataInterpolationInfo( const class FEMTree< Dim , Real >& tree , const std::vector< PointSample >& samples , ConstPointer( Data ) sampleData , ConstraintDual constraintDual , SystemDual systemDual , bool constrainsDCTerm , bool noRescale ) { ApproximateChildPointAndDataInterpolationInfo< T , Data , PointD , ConstraintDual , SystemDual >* a = new ApproximateChildPointAndDataInterpolationInfo< T , Data , PointD , ConstraintDual , SystemDual >( constraintDual , systemDual , constrainsDCTerm ); a->_iData = tree._densifyChildInterpolationInfoAndSetDualConstraints< T , Data , PointD >( samples , sampleData , constraintDual , noRescale ); return a; } template< typename T , unsigned int PointD , typename ConstraintDual , typename SystemDual > static ExactPointInterpolationInfo< T , PointD , ConstraintDual , SystemDual >* InitializeExactPointInterpolationInfo( const class FEMTree< Dim , Real >& tree , const std::vector< PointSample >& samples , ConstraintDual constraintDual , SystemDual systemDual , bool constrainsDCTerm , bool noRescale ) { ExactPointInterpolationInfo< T , PointD , ConstraintDual , SystemDual >* e = new ExactPointInterpolationInfo< T , PointD , ConstraintDual , SystemDual >( constraintDual , systemDual , constrainsDCTerm ); e->_init( tree , samples , noRescale ); return e; } template< typename T , typename Data , unsigned int PointD , typename ConstraintDual , typename SystemDual > static ExactPointAndDataInterpolationInfo< T , Data , PointD , ConstraintDual , SystemDual >* InitializeExactPointAndDataInterpolationInfo( const class FEMTree< Dim , Real >& tree , const std::vector< PointSample >& samples , ConstPointer( Data ) sampleData , ConstraintDual constraintDual , SystemDual systemDual , bool constrainsDCTerm , bool noRescale ) { ExactPointAndDataInterpolationInfo< T , Data , PointD , ConstraintDual , SystemDual >* e = new ExactPointAndDataInterpolationInfo< T , Data , PointD , ConstraintDual , SystemDual >( constraintDual , systemDual , constrainsDCTerm ); e->_init( tree , samples , sampleData , noRescale ); return e; } template< typename T , unsigned int PointD , typename ConstraintDual , typename SystemDual > friend struct ExactPointInterpolationInfo; template< typename T , typename Data , unsigned int PointD , typename ConstraintDual , typename SystemDual > friend struct ExactPointAndDataInterpolationInfo; template< typename T , unsigned int PointD , unsigned int ... PointDs > static bool ConstrainsDCTerm( const InterpolationInfo< T , PointD >* iInfo , const InterpolationInfo< T , PointDs >* ... iInfos ){ return ConstrainsDCTerm( iInfo ) || ConstrainsDCTerm( iInfos... ); } template< typename T , unsigned int PointD > static bool ConstrainsDCTerm( const InterpolationInfo< T , PointD >* iInfo ){ return iInfo && iInfo->constrainsDCTerm(); } static bool ConstrainsDCTerm( void ){ return false; } #ifdef SHOW_WARNINGS #pragma message( "[WARNING] This should not be isotropic" ) #endif // SHOW_WARNINGS template< unsigned int DensityDegree > struct DensityEstimator : public SparseNodeData< Real , IsotropicUIntPack< Dim , FEMDegreeAndBType< DensityDegree >::Signature > > { DensityEstimator( int kernelDepth , int coDimension ) : _kernelDepth( kernelDepth ) , _coDimension( coDimension ){ ; } int coDimension( void ) const { return _coDimension; } int kernelDepth( void ) const { return _kernelDepth; } protected: int _kernelDepth , _coDimension; }; protected: bool _isValidSpaceNode( const FEMTreeNode* node ) const { return !GetGhostFlag< Dim >( node ) && ( node->nodeData.flags & FEMTreeNodeData::SPACE_FLAG ); } bool _isValidFEM1Node ( const FEMTreeNode* node ) const { return !GetGhostFlag< Dim >( node ) && ( node->nodeData.flags & FEMTreeNodeData::FEM_FLAG_1 ); } bool _isValidFEM2Node ( const FEMTreeNode* node ) const { return !GetGhostFlag< Dim >( node ) && ( node->nodeData.flags & FEMTreeNodeData::FEM_FLAG_2 ); } bool _isRefinableNode ( const FEMTreeNode* node ) const { return !GetGhostFlag< Dim >( node ) && ( node->nodeData.flags & FEMTreeNodeData::REFINABLE_FLAG ); } FEMTreeNode* _tree; FEMTreeNode* _spaceRoot; SortedTreeNodes< Dim > _sNodes; LocalDepth _maxDepth; int _depthOffset; mutable unsigned int _femSigs1[ Dim ]; mutable unsigned int _femSigs2[ Dim ]; mutable unsigned int _refinableSigs[ Dim ]; static bool _InBounds( Point< Real , Dim > p ); int _localToGlobal( LocalDepth d ) const { return d + _depthOffset; } LocalDepth _localDepth( const FEMTreeNode* node ) const { return node->depth() - _depthOffset; } int _localInset( LocalDepth d ) const { return _depthOffset<=1 ? 0 : 1<<( d + _depthOffset - 1 ); } void _localDepthAndOffset( const FEMTreeNode* node , LocalDepth& d , LocalOffset& off ) const { node->depthAndOffset( d , off ) ; d -= _depthOffset; int inset = _localInset( d ); for( int d=0 ; d<Dim ; d++ ) off[d] -= inset; } template< unsigned int FEMSig > static int _BSplineBegin( LocalDepth depth ){ return BSplineEvaluationData< FEMSig >::Begin( depth ); } template< unsigned int FEMSig > static int _BSplineEnd ( LocalDepth depth ){ return BSplineEvaluationData< FEMSig >::End ( depth ); } template< unsigned int ... FEMSigs > bool _outOfBounds( UIntPack< FEMSigs ... > , const FEMTreeNode* node ) const { if( !node ) return true; LocalDepth d ; LocalOffset off ; _localDepthAndOffset( node , d , off ); return FEMIntegrator::IsOutOfBounds( UIntPack< FEMSigs ... >() , d , off ); } int _sNodesBegin( LocalDepth d ) const { return _sNodes.begin( _localToGlobal( d ) ); } int _sNodesEnd ( LocalDepth d ) const { return _sNodes.end ( _localToGlobal( d ) ); } int _sNodesSize ( LocalDepth d ) const { return _sNodes.size ( _localToGlobal( d ) ); } int _sNodesBeginSlice( LocalDepth d ) const { return _localInset(d); } int _sNodesEndSlice( LocalDepth d ) const{ return ( 1<<_localToGlobal(d) ) - _localInset(d) - 1; } int _sNodesBegin( LocalDepth d , int slice ) const { return _sNodes.begin( _localToGlobal( d ) , slice + _localInset( d ) ); } int _sNodesEnd ( LocalDepth d , int slice ) const { return _sNodes.end ( _localToGlobal( d ) , slice + _localInset( d ) ); } int _sNodesSize ( LocalDepth d , int slice ) const { return _sNodes.size ( _localToGlobal( d ) , slice + _localInset( d ) ); } template< unsigned int FEMDegree > static bool _IsInteriorlySupported( LocalDepth depth , const LocalOffset off ) { if( depth>=0 ) { int begin , end; BSplineSupportSizes< FEMDegree >::InteriorSupportedSpan( depth , begin , end ); bool interior = true; for( int dd=0 ; dd<Dim ; dd++ ) interior &= off[dd]>=begin && off[dd]<end; return interior; } else return false; } template< unsigned int FEMDegree > bool _isInteriorlySupported( const FEMTreeNode* node ) const { if( !node ) return false; LocalDepth d ; LocalOffset off; _localDepthAndOffset( node , d , off ); return _IsInteriorlySupported< FEMDegree >( d , off ); } template< unsigned int ... FEMDegrees > static bool _IsInteriorlySupported( UIntPack< FEMDegrees ... > , LocalDepth depth , const LocalOffset off ){ return BaseFEMIntegrator::IsInteriorlySupported( UIntPack< FEMDegrees ... >() , depth , off ); } template< unsigned int ... FEMDegrees > bool _isInteriorlySupported( UIntPack< FEMDegrees ... > , const FEMTreeNode* node ) const { if( !node ) return false; LocalDepth d ; LocalOffset off ; _localDepthAndOffset( node , d , off ); return _IsInteriorlySupported< FEMDegrees ... >( UIntPack< FEMDegrees ... >() , d , off ); } template< unsigned int FEMDegree1 , unsigned int FEMDegree2 > static bool _IsInteriorlyOverlapped( LocalDepth depth , const LocalOffset off ) { if( depth>=0 ) { int begin , end; BSplineIntegrationData< FEMDegreeAndBType< FEMDegree1 , BOUNDARY_NEUMANN >::Signature , FEMDegreeAndBType< FEMDegree2 , BOUNDARY_NEUMANN >::Signature >::InteriorOverlappedSpan( depth , begin , end ); bool interior = true; for( int dd=0 ; dd<Dim ; dd++ ) interior &= off[dd]>=begin && off[dd]<end; return interior; } else return false; } template< unsigned int FEMDegree1 , unsigned int FEMDegree2 > bool _isInteriorlyOverlapped( const FEMTreeNode* node ) const { if( !node ) return false; LocalDepth d ; LocalOffset off; _localDepthAndOffset( node , d , off ); return _IsInteriorlyOverlapped< FEMDegree1 , FEMDegree2 >( d , off ); } template< unsigned int ... FEMDegrees1 , unsigned int ... FEMDegrees2 > static bool _IsInteriorlyOverlapped( UIntPack< FEMDegrees1 ... > , UIntPack< FEMDegrees2 ... > , LocalDepth depth , const LocalOffset off ){ return BaseFEMIntegrator::IsInteriorlyOverlapped( UIntPack< FEMDegrees1 ... >() , UIntPack< FEMDegrees2 ... >() , depth , off ); } template< unsigned int ... FEMDegrees1 , unsigned int ... FEMDegrees2 > bool _isInteriorlyOverlapped( UIntPack< FEMDegrees1 ... > , UIntPack< FEMDegrees2 ... > , const FEMTreeNode* node ) const { if( !node ) return false; LocalDepth d ; LocalOffset off ; _localDepthAndOffset( node , d , off ); return _IsInteriorlyOverlapped( UIntPack< FEMDegrees1 ... >() , UIntPack< FEMDegrees2 ... >() , d , off ); } void _startAndWidth( const FEMTreeNode* node , Point< Real , Dim >& start , Real& width ) const { LocalDepth d ; LocalOffset off; _localDepthAndOffset( node , d , off ); if( d>=0 ) width = Real( 1.0 / (1<< d ) ); else width = Real( 1.0 * (1<<(-d)) ); for( int dd=0 ; dd<Dim ; dd++ ) start[dd] = Real( off[dd] ) * width; } void _centerAndWidth( const FEMTreeNode* node , Point< Real , Dim >& center , Real& width ) const { int d , off[Dim]; _localDepthAndOffset( node , d , off ); width = Real( 1.0 / (1<<d) ); for( int dd=0 ; dd<Dim ; dd++ ) center[dd] = Real( off[dd] + 0.5 ) * width; } int _childIndex( const FEMTreeNode* node , Point< Real , Dim > p ) const { Point< Real , Dim > c ; Real w; _centerAndWidth( node , c , w ); int cIdx = 0; for( int d=0 ; d<Dim ; d++ ) if( p[d]>=c[d] ) cIdx |= (1<<d); return cIdx; } template< unsigned int ... Degrees > void _setFullDepth( UIntPack< Degrees ... > , FEMTreeNode* node , LocalDepth depth ); template< unsigned int ... Degrees > void _setFullDepth( UIntPack< Degrees ... > , LocalDepth depth ); template< unsigned int ... Degrees > LocalDepth _getFullDepth( UIntPack< Degrees ... > , const FEMTreeNode* node ) const; public: template< unsigned int ... Degrees > LocalDepth getFullDepth( UIntPack< Degrees ... > ) const; LocalDepth depth( const FEMTreeNode* node ) const { return _localDepth( node ); } void depthAndOffset( const FEMTreeNode* node , LocalDepth& depth , LocalOffset& offset ) const { _localDepthAndOffset( node , depth , offset ); } int nodesSize ( void ) const { return _sNodes.size( ); } int nodesBegin( LocalDepth d ) const { return _sNodes.begin( _localToGlobal( d ) ); } int nodesEnd ( LocalDepth d ) const { return _sNodes.end ( _localToGlobal( d ) ); } int nodesSize ( LocalDepth d ) const { return _sNodes.size ( _localToGlobal( d ) ); } int nodesBegin( LocalDepth d , int slice ) const { return _sNodes.begin( _localToGlobal( d ) , slice + _localInset( d ) ); } int nodesEnd ( LocalDepth d , int slice ) const { return _sNodes.end ( _localToGlobal( d ) , slice + _localInset( d ) ); } int nodesSize ( LocalDepth d , int slice ) const { return _sNodes.size ( _localToGlobal( d ) , slice + _localInset( d ) ); } const FEMTreeNode* node( int idx ) const { return _sNodes.treeNodes[idx]; } void centerAndWidth( int idx , Point< Real , Dim >& center , Real& width ) const { _centerAndWidth( _sNodes.treeNodes[idx] , center , width ); } void startAndWidth( int idx , Point< Real , Dim >& center , Real& width ) const { _startAndWidth( _sNodes.treeNodes[idx] , center , width ); } protected: ///////////////////////////////////// // System construction code // // MultiGridFEMTreeData.System.inl // ///////////////////////////////////// public: template< unsigned int ... FEMSigs > void setMultiColorIndices( UIntPack< FEMSigs ... > , int depth , std::vector< std::vector< int > >& indices ) const; protected: template< unsigned int ... FEMSigs > void _setMultiColorIndices( UIntPack< FEMSigs ... > , int start , int end , std::vector< std::vector< int > >& indices ) const; struct _SolverStats { double constraintUpdateTime , systemTime , solveTime; double bNorm2 , inRNorm2 , outRNorm2; }; template< unsigned int ... FEMSigs , typename T , unsigned int PointD , unsigned int ... PointDs > typename std::enable_if< (sizeof...(PointDs)!=0) >::type _addPointValues( UIntPack< FEMSigs ... > , StaticWindow< Real , UIntPack< BSplineOverlapSizes< FEMSignature< FEMSigs >::Degree >::OverlapSize ... > >& pointValues , const typename FEMTreeNode::template ConstNeighbors< UIntPack< BSplineOverlapSizes< FEMSignature< FEMSigs >::Degree >::OverlapSize ... > >& neighbors , const PointEvaluator< UIntPack< FEMSigs ... > , UIntPack< FEMSignature< FEMSigs >::Degree ... > >& bsData , const InterpolationInfo< T , PointD >* iInfo , const InterpolationInfo< T , PointDs >* ... iInfos ) const { _addPointValues( UIntPack< FEMSigs ... >() , pointValues , neighbors , bsData , iInfo ) , _addPointValues( UIntPack< FEMSigs ... >() , pointValues , neighbors , bsData , iInfos... ); } template< unsigned int ... FEMSigs > void _addPointValues( UIntPack< FEMSigs ... > , StaticWindow< Real , UIntPack< BSplineOverlapSizes< FEMSignature< FEMSigs >::Degree >::OverlapSize ... > >& pointValues , const typename FEMTreeNode::template ConstNeighbors< UIntPack< BSplineOverlapSizes< FEMSignature< FEMSigs >::Degree >::OverlapSize ... > >& neighbors , const PointEvaluator< UIntPack< FEMSigs ... > , UIntPack< FEMSignature< FEMSigs >::Degree ... > >& bsData ) const { } template< unsigned int ... FEMSigs , typename T , unsigned int PointD > void _addPointValues( UIntPack< FEMSigs ... > , StaticWindow< Real , UIntPack< BSplineOverlapSizes< FEMSignature< FEMSigs >::Degree >::OverlapSize ... > >& pointValues , const typename FEMTreeNode::template ConstNeighbors< UIntPack< BSplineOverlapSizes< FEMSignature< FEMSigs >::Degree >::OverlapSize ... > >& neighbors , const PointEvaluator< UIntPack< FEMSigs ... > , UIntPack< FEMSignature< FEMSigs >::Degree ... > >& bsData , const InterpolationInfo< T , PointD >* interpolationInfo ) const; template< unsigned int ... FEMSigs , typename T , unsigned int PointD , unsigned int ... PointDs > typename std::enable_if< (sizeof...(PointDs)>1) >::type _addProlongedPointValues( UIntPack< FEMSigs ... > , WindowSlice< Real , UIntPack< BSplineOverlapSizes< FEMSignature< FEMSigs >::Degree >::OverlapSize ... > > pointValues , const typename FEMTreeNode::template ConstNeighbors< UIntPack< BSplineOverlapSizes< FEMSignature< FEMSigs >::Degree >::OverlapSize ... > >& neighbors , const typename FEMTreeNode::template ConstNeighbors< UIntPack< BSplineOverlapSizes< FEMSignature< FEMSigs >::Degree >::OverlapSize ... > >& pNeighbors , const PointEvaluator< UIntPack< FEMSigs ... > , UIntPack< FEMSignature< FEMSigs >::Degree ... > >& bsData , const InterpolationInfo< T , PointD >* iInfo , const InterpolationInfo< T , PointDs >* ... iInfos ) const { _addProlongedPointValues( UIntPack< FEMSigs ... >() , pointValues , neighbors , pNeighbors , bsData , iInfo ) , _addProlongedPointValues( UIntPack< FEMSigs ... >() , pointValues , neighbors , pNeighbors , bsData , iInfos... ); } template< unsigned int ... FEMSigs > void _addProlongedPointValues( UIntPack< FEMSigs ... > , WindowSlice< Real , UIntPack< BSplineOverlapSizes< FEMSignature< FEMSigs >::Degree >::OverlapSize ... > > pointValues , const typename FEMTreeNode::template ConstNeighbors< UIntPack< BSplineOverlapSizes< FEMSignature< FEMSigs >::Degree >::OverlapSize ... > >& neighbors , const typename FEMTreeNode::template ConstNeighbors< UIntPack< BSplineOverlapSizes< FEMSignature< FEMSigs >::Degree >::OverlapSize ... > >& pNeighbors , const PointEvaluator< UIntPack< FEMSigs ... > , UIntPack< FEMSignature< FEMSigs >::Degree ... > >& bsData ) const { } template< unsigned int ... FEMSigs , typename T , unsigned int PointD > void _addProlongedPointValues( UIntPack< FEMSigs ... > , WindowSlice< Real , UIntPack< BSplineOverlapSizes< FEMSignature< FEMSigs >::Degree >::OverlapSize ... > > pointValues , const typename FEMTreeNode::template ConstNeighbors< UIntPack< BSplineOverlapSizes< FEMSignature< FEMSigs >::Degree >::OverlapSize ... > >& neighbors , const typename FEMTreeNode::template ConstNeighbors< UIntPack< BSplineOverlapSizes< FEMSignature< FEMSigs >::Degree >::OverlapSize ... > >& pNeighbors , const PointEvaluator< UIntPack< FEMSigs ... > , UIntPack< FEMSignature< FEMSigs >::Degree ... > >& bsData , const InterpolationInfo< T , PointD >* iInfo ) const; template< unsigned int ... FEMSigs , typename T , unsigned int PointD , unsigned int ... PointDs > typename std::enable_if< (sizeof...(PointDs)!=0) >::type _setPointValuesFromProlongedSolution( LocalDepth highDepth , const PointEvaluator< UIntPack< FEMSigs ... > , UIntPack< FEMSignature< FEMSigs >::Degree ... > >& bsData , ConstPointer( T ) prolongedSolution , InterpolationInfo< T , PointD >* iInfo , InterpolationInfo< T , PointDs >* ... iInfos ) const { _setPointValuesFromProlongedSolution( highDepth , bsData , prolongedSolution , iInfo ) , _setPointValuesFromProlongedSolution( highDepth , bsData , prolongedSolution , iInfos... ); } template< unsigned int ... FEMSigs , typename T > void _setPointValuesFromProlongedSolution( LocalDepth highDepth , const PointEvaluator< UIntPack< FEMSigs ... > , UIntPack< FEMSignature< FEMSigs >::Degree ... > >& bsData , ConstPointer( T ) prolongedSolution ) const { } template< unsigned int ... FEMSigs , typename T , unsigned int PointD > void _setPointValuesFromProlongedSolution( LocalDepth highDepth , const PointEvaluator< UIntPack< FEMSigs ... > , UIntPack< FEMSignature< FEMSigs >::Degree ... > >& bsData , ConstPointer( T ) prolongedSolution , InterpolationInfo< T , PointD >* interpolationInfo ) const; template< unsigned int ... FEMSigs , typename T , unsigned int PointD , unsigned int ... PointDs > typename std::enable_if< (sizeof...(PointDs)!=0) , T >::type _getInterpolationConstraintFromProlongedSolution( const typename FEMTreeNode::template ConstNeighbors< UIntPack< BSplineOverlapSizes< FEMSignature< FEMSigs >::Degree >::OverlapSize ... > >& neighbors , const FEMTreeNode* node , ConstPointer( T ) prolongedSolution , const PointEvaluator< UIntPack< FEMSigs ... > , UIntPack< FEMSignature< FEMSigs >::Degree ... > >& bsData , const InterpolationInfo< T , PointD >* iInfo , const InterpolationInfo< T , PointDs >* ... iInfos ) const { return _getInterpolationConstraintFromProlongedSolution( neighbors , node , prolongedSolution , bsData , iInfo ) + _getInterpolationConstraintFromProlongedSolution( neighbors , node , prolongedSolution , bsData , iInfos... ); } template< unsigned int ... FEMSigs , typename T > T _getInterpolationConstraintFromProlongedSolution( const typename FEMTreeNode::template ConstNeighbors< UIntPack< BSplineOverlapSizes< FEMSignature< FEMSigs >::Degree >::OverlapSize ... > >& neighbors , const FEMTreeNode* node , ConstPointer( T ) prolongedSolution , const PointEvaluator< UIntPack< FEMSigs ... > , UIntPack< FEMSignature< FEMSigs >::Degree ... > >& bsData ) const { return T(); } template< unsigned int ... FEMSigs , typename T , unsigned int PointD > T _getInterpolationConstraintFromProlongedSolution( const typename FEMTreeNode::template ConstNeighbors< UIntPack< BSplineOverlapSizes< FEMSignature< FEMSigs >::Degree >::OverlapSize ... > >& neighbors , const FEMTreeNode* node , ConstPointer( T ) prolongedSolution , const PointEvaluator< UIntPack< FEMSigs ... > , UIntPack< FEMSignature< FEMSigs >::Degree ... > >& bsData , const InterpolationInfo< T , PointD >* iInfo ) const; template< unsigned int ... FEMSigs , typename T , unsigned int PointD , unsigned int ... PointDs > typename std::enable_if< (sizeof...(PointDs)!=0) >::type _updateRestrictedInterpolationConstraints( const PointEvaluator< UIntPack< FEMSigs ... > , UIntPack< FEMSignature< FEMSigs >::Degree ... > >& bsData , LocalDepth highDepth , ConstPointer( T ) solution , Pointer( T ) cumulativeConstraints , const InterpolationInfo< T , PointD >* iInfo , const InterpolationInfo< T , PointDs >* ... iInfos ) const { _updateRestrictedInterpolationConstraints( bsData , highDepth , solution , cumulativeConstraints , iInfo ) , _updateRestrictedInterpolationConstraints( bsData , highDepth , solution , cumulativeConstraints , iInfos... ); } template< unsigned int ... FEMSigs , typename T > void _updateRestrictedInterpolationConstraints( PointEvaluator< UIntPack< FEMSigs ... > , UIntPack< FEMSignature< FEMSigs >::Degree ... > >& bsData , LocalDepth highDepth , ConstPointer( T ) solution , Pointer( T ) cumulativeConstraints ) const { ; } template< unsigned int ... FEMSigs , typename T , unsigned int PointD > void _updateRestrictedInterpolationConstraints( const PointEvaluator< UIntPack< FEMSigs ... > , UIntPack< FEMSignature< FEMSigs >::Degree ... > >& bsData , LocalDepth highDepth , ConstPointer( T ) solution , Pointer( T ) cumulativeConstraints , const InterpolationInfo< T , PointD >* interpolationInfo ) const; template< unsigned int FEMDegree1 , unsigned int FEMDegree2 > static void _SetParentOverlapBounds( const FEMTreeNode* node , int start[Dim] , int end[Dim] ); template< unsigned int FEMDegree1 , unsigned int FEMDegree2 > static void _SetParentOverlapBounds( int cIdx , int start[Dim] , int end[Dim] ); template< unsigned int ... FEMDegrees1 , unsigned int ... FEMDegrees2 > static void _SetParentOverlapBounds( UIntPack< FEMDegrees1 ... > , UIntPack< FEMDegrees2 ... > , const FEMTreeNode* node , int start[Dim] , int end[Dim] ) { if( node ) { int d , off[Dim] ; node->depthAndOffset( d , off ); BaseFEMIntegrator::template ParentOverlapBounds( UIntPack< FEMDegrees1 ... >() , UIntPack< FEMDegrees2 ... >() , d , off , start , end ); } } template< unsigned int ... FEMDegrees1 , unsigned int ... FEMDegrees2 > static void _SetParentOverlapBounds( UIntPack< FEMDegrees1 ... > , UIntPack< FEMDegrees2 ... > , int cIdx , int start[Dim] , int end[Dim] ) { BaseFEMIntegrator::template ParentOverlapBounds( UIntPack< FEMDegrees1 ... >() , UIntPack< FEMDegrees2 ... >() , cIdx , start , end ); } template< unsigned int ... FEMSigs > int _getProlongedMatrixRowSize( const FEMTreeNode* node , const typename FEMTreeNode::template ConstNeighbors< UIntPack< BSplineOverlapSizes< FEMSignature< FEMSigs >::Degree >::OverlapSize ... > >& pNeighbors ) const; #if defined( __GNUC__ ) && __GNUC__ < 5 #warning "you've got me gcc version<5" template< unsigned int ... FEMSigs > int _getMatrixRowSize( UIntPack< FEMSigs ... > , const typename FEMTreeNode::template ConstNeighbors< UIntPack< BSplineOverlapSizes< FEMSignature< FEMSigs >::Degree >::OverlapSize ... > >& neighbors ) const; #else // !__GNUC__ || __GNUC__ >=5 template< unsigned int ... FEMSigs > int _getMatrixRowSize( const typename FEMTreeNode::template ConstNeighbors< UIntPack< BSplineOverlapSizes< FEMSignature< FEMSigs >::Degree >::OverlapSize ... > >& neighbors ) const; #endif // __GNUC__ || __GNUC__ < 4 template< typename T , unsigned int ... PointDs , unsigned int ... FEMSigs > T _setMatrixRowAndGetConstraintFromProlongation( UIntPack< FEMSigs ... > , const BaseSystem< UIntPack< FEMSignature< FEMSigs >::Degree ... > >& F , const typename FEMTreeNode::template ConstNeighbors< UIntPack< BSplineOverlapSizes< FEMSignature< FEMSigs >::Degree >::OverlapSize ... > >& pNeighbors , const typename FEMTreeNode::template ConstNeighbors< UIntPack< BSplineOverlapSizes< FEMSignature< FEMSigs >::Degree >::OverlapSize ... > >& neighbors , Pointer( MatrixEntry< Real > ) row , int offset , const PCStencils< UIntPack< FEMSignature< FEMSigs >::Degree ... > >& pcStencils , const CCStencil< UIntPack< FEMSignature< FEMSigs >::Degree ... > >& ccStencil , const PointEvaluator< UIntPack< FEMSigs ... > , UIntPack< FEMSignature< FEMSigs >::Degree ... > >& bsData , ConstPointer( T ) prolongedSolution , const InterpolationInfo< T , PointDs >* ... interpolationInfo ) const; template< typename T , unsigned int ... PointDs , unsigned int ... FEMSigs > int _setProlongedMatrixRow( const typename BaseFEMIntegrator::System< UIntPack< FEMSignature< FEMSigs >::Degree ... > >& F , const typename FEMTreeNode::template ConstNeighbors< UIntPack< BSplineOverlapSizes< FEMSignature< FEMSigs >::Degree >::OverlapSize ... > >& neighbors , const typename FEMTreeNode::template ConstNeighbors< UIntPack< BSplineOverlapSizes< FEMSignature< FEMSigs >::Degree >::OverlapSize ... > >& pNeighbors , Pointer( MatrixEntry< Real > ) row , int offset , const DynamicWindow< double , UIntPack< BSplineOverlapSizes< FEMSignature< FEMSigs >::Degree >::OverlapSize ... > >& stencil , const PointEvaluator< UIntPack< FEMSigs ... > , UIntPack< FEMSignature< FEMSigs >::Degree ... > >& bsData , const InterpolationInfo< T , PointDs >* ... interpolationInfo ) const; // Updates the constraints @(depth) based on the solution coefficients @(depth-1) template< unsigned int ... FEMSigs , typename T , unsigned int ... PointDs > T _getConstraintFromProlongedSolution( UIntPack< FEMSigs ... > , const BaseSystem< UIntPack< FEMSignature< FEMSigs >::Degree ... > >& F , const typename FEMTreeNode::template ConstNeighbors< UIntPack< BSplineOverlapSizes< FEMSignature< FEMSigs >::Degree >::OverlapSize ... > >& neighbors , const typename FEMTreeNode::template ConstNeighbors< UIntPack< BSplineOverlapSizes< FEMSignature< FEMSigs >::Degree >::OverlapSize ... > >& pNeighbors , const FEMTreeNode* node , ConstPointer( T ) prolongedSolution , const DynamicWindow< double , UIntPack< BSplineOverlapSizes< FEMSignature< FEMSigs >::Degree >::OverlapSize ... > >& stencil , const PointEvaluator< UIntPack< FEMSigs ... > , UIntPack< FEMSignature< FEMSigs >::Degree ... > >& bsData , const InterpolationInfo< T , PointDs >* ... interpolationInfo ) const; template< unsigned int ... FEMSigs , typename T , typename TDotT , typename SORWeights , unsigned int ... PointDs > int _solveFullSystemGS( UIntPack< FEMSigs ... > , const typename BaseFEMIntegrator::System< UIntPack< FEMSignature< FEMSigs >::Degree ... > >& F , const PointEvaluator< UIntPack< FEMSigs ... > , UIntPack< FEMSignature< FEMSigs >::Degree ... > >& bsData , LocalDepth depth , Pointer( T ) solution , ConstPointer( T ) prolongedSolution , ConstPointer( T ) constraints , TDotT Dot , int iters , bool coarseToFine , SORWeights sorWeights , _SolverStats& stats , bool computeNorms , const InterpolationInfo< T , PointDs >* ... interpolationInfo ) const; template< unsigned int ... FEMSigs , typename T , typename TDotT , typename SORWeights , unsigned int ... PointDs > int _solveSlicedSystemGS( UIntPack< FEMSigs ... > , const typename BaseFEMIntegrator::System< UIntPack< FEMSignature< FEMSigs >::Degree ... > >& F , const PointEvaluator< UIntPack< FEMSigs ... > , UIntPack< FEMSignature< FEMSigs >::Degree ... > >& bsData , LocalDepth depth , Pointer( T ) solution , ConstPointer( T ) prolongedSolution , ConstPointer( T ) constraints , TDotT Dot , int iters , bool coarseToFine , unsigned int sliceBlockSize , SORWeights sorWeights , _SolverStats& stats , bool computeNorms , const InterpolationInfo< T , PointDs >* ... interpolationInfo ) const; template< unsigned int ... FEMSigs , typename T , typename TDotT , typename SORWeights , unsigned int ... PointDs > int _solveSystemGS( UIntPack< FEMSigs ... > , bool sliced , const typename BaseFEMIntegrator::System< UIntPack< FEMSignature< FEMSigs >::Degree ... > >& F , const PointEvaluator< UIntPack< FEMSigs ... > , UIntPack< FEMSignature< FEMSigs >::Degree ... > >& bsData , LocalDepth depth , Pointer( T ) solution , ConstPointer( T ) prolongedSolution , ConstPointer( T ) constraints , TDotT Dot , int iters , bool coarseToFine , unsigned int sliceBlockSize , SORWeights sorWeights , _SolverStats& stats , bool computeNorms , const InterpolationInfo< T , PointDs >* ... interpolationInfo ) const { if( sliced ) return _solveSlicedSystemGS( UIntPack< FEMSigs ... >() , F , bsData , depth , solution , prolongedSolution , constraints , Dot , iters , coarseToFine , sliceBlockSize , sorWeights , stats , computeNorms , interpolationInfo ... ); else return _solveFullSystemGS ( UIntPack< FEMSigs ... >() , F , bsData , depth , solution , prolongedSolution , constraints , Dot , iters , coarseToFine , sorWeights , stats , computeNorms , interpolationInfo ... ); } template< unsigned int ... FEMSigs , typename T , typename TDotT , unsigned int ... PointDs > int _solveSystemCG( UIntPack< FEMSigs ... > , const typename BaseFEMIntegrator::System< UIntPack< FEMSignature< FEMSigs >::Degree ... > >& F , const PointEvaluator< UIntPack< FEMSigs ... > , UIntPack< FEMSignature< FEMSigs >::Degree ... > >& bsData , LocalDepth depth , Pointer( T ) solution , ConstPointer( T ) prolongedSolution , ConstPointer( T ) constraints , TDotT Dot , int iters , bool coarseToFine , _SolverStats& stats , bool computeNorms , double cgAccuracy , const InterpolationInfo< T , PointDs >* ... interpolationInfo ) const; template< unsigned int ... FEMSigs , typename T , typename TDotT , unsigned int ... PointDs > void _solveRegularMG( UIntPack< FEMSigs ... > , typename BaseFEMIntegrator::System< UIntPack< FEMSignature< FEMSigs >::Degree ... > >& F , const PointEvaluator< UIntPack< FEMSigs ... > , UIntPack< FEMSignature< FEMSigs >::Degree ... > >& bsData , LocalDepth depth , Pointer( T ) solution , ConstPointer( T ) constraints , TDotT Dot , int vCycles , int iters , _SolverStats& stats , bool computeNorms , double cgAccuracy , const InterpolationInfo< T , PointDs >* ... interpolationInfo ) const; // Updates the cumulative integral constraints @(depth-1) based on the change in solution coefficients @(depth) template< unsigned int ... FEMSigs , typename T > void _updateRestrictedIntegralConstraints( UIntPack< FEMSigs ... > , const typename BaseFEMIntegrator::System< UIntPack< FEMSignature< FEMSigs >::Degree ... > >& F , LocalDepth highDepth , ConstPointer( T ) solution , Pointer( T ) cumulativeConstraints ) const; template< unsigned int PointD , typename T , unsigned int ... FEMSigs > CumulativeDerivativeValues< T , Dim , PointD > _coarserFunctionValues( UIntPack< FEMSigs ... > , Point< Real , Dim > p , const ConstPointSupportKey< UIntPack< FEMSignature< FEMSigs >::Degree ... > >& neighborKey , const FEMTreeNode* node , const PointEvaluator< UIntPack< FEMSigs ... > , UIntPack< FEMSignature< FEMSigs >::Degree ... > >& bsData , ConstPointer( T ) coefficients ) const; template< unsigned int PointD , typename T , unsigned int ... FEMSigs > CumulativeDerivativeValues< T , Dim , PointD > _finerFunctionValues( UIntPack< FEMSigs ... > , Point< Real , Dim > p , const ConstPointSupportKey< UIntPack< FEMSignature< FEMSigs >::Degree ... > >& neighborKey , const FEMTreeNode* node , const PointEvaluator< UIntPack< FEMSigs ... > , UIntPack< FEMSignature< FEMSigs >::Degree ... > >& bsData , ConstPointer( T ) coefficients ) const; template< unsigned int ... FEMSigs , typename T , unsigned int ... PointDs > int _getSliceMatrixAndProlongationConstraints( UIntPack< FEMSigs ... > , const BaseSystem< UIntPack< FEMSignature< FEMSigs >::Degree ... > >& F , SparseMatrix< Real >& matrix , Pointer( Real ) diagonalR , const PointEvaluator< UIntPack< FEMSigs ... > , UIntPack< FEMSignature< FEMSigs >::Degree ... > >& bsData , LocalDepth depth , int nBegin , int nEnd , ConstPointer( T ) prolongedSolution , Pointer( T ) constraints , const CCStencil < UIntPack< FEMSignature< FEMSigs >::Degree ... > >& ccStencil , const PCStencils< UIntPack< FEMSignature< FEMSigs >::Degree ... > >& pcStencils , const InterpolationInfo< T , PointDs >* ... interpolationInfo ) const; // Down samples constraints @(depth) to constraints @(depth-1) template< class C , unsigned ... Degrees , unsigned int ... FEMSigs > void _downSample( UIntPack< FEMSigs ... > , typename BaseFEMIntegrator::template RestrictionProlongation< UIntPack< Degrees ... > >& RP , LocalDepth highDepth , Pointer( C ) constraints ) const; // Up samples coefficients @(depth-1) to coefficients @(depth) template< class C , unsigned ... Degrees , unsigned int ... FEMSigs > void _upSample( UIntPack< FEMSigs ... > , typename BaseFEMIntegrator::template RestrictionProlongation< UIntPack< Degrees ... > >& RP , LocalDepth highDepth , Pointer( C ) coefficients ) const; template< bool XMajor , class C , unsigned int ... FEMSigs > static void _RegularGridUpSample( UIntPack< FEMSigs ... > , LocalDepth highDepth , ConstPointer( C ) lowCoefficients , Pointer( C ) highCoefficients ); template< bool XMajor , class C , unsigned int ... FEMSigs > static void _RegularGridUpSample( UIntPack< FEMSigs ... > , const int lowBegin[] , const int lowEnd[] , const int highBegin[] , const int highEnd[] , LocalDepth highDepth , ConstPointer( C ) lowCoefficients , Pointer( C ) highCoefficients ); public: template< class C , unsigned int ... FEMSigs > DenseNodeData< C , UIntPack< FEMSigs ... > > coarseCoefficients( const DenseNodeData< C , UIntPack< FEMSigs ... > >& coefficients ) const; template< class C , unsigned int ... FEMSigs > DenseNodeData< C , UIntPack< FEMSigs ... > > coarseCoefficients( const SparseNodeData< C , UIntPack< FEMSigs ... > >& coefficients ) const; // For each (valid) fem node, compute the ratio of the sum of active prolongation weights to the sum of total prolongation weights // If the prolongToChildren flag is set, then these weights are pushed to the children by computing the ratio of the prolongation of the above weights to the prolongation of unity weights template< unsigned int ... FEMSigs > DenseNodeData< Real , UIntPack< FEMSigs ... > > prolongationWeights( UIntPack< FEMSigs ... > , bool prolongToChildren ) const; // For each (valid) fem node, compute the integral of the basis function over the valid space nodes over the integral of the basis function template< unsigned int ... FEMSigs > DenseNodeData< Real , UIntPack< FEMSigs ... > > supportWeights( UIntPack< FEMSigs ... > ) const; protected: ////////////////////////////////////////////// // Code for splatting point-sample data // // MultiGridFEMTreeData.WeightedSamples.inl // ////////////////////////////////////////////// template< unsigned int WeightDegree > void _addWeightContribution( DensityEstimator< WeightDegree >& densityWeights , FEMTreeNode* node , Point< Real , Dim > position , PointSupportKey< IsotropicUIntPack< Dim , WeightDegree > >& weightKey , Real weight=Real(1.0) ); template< unsigned int WeightDegree , class PointSupportKey > Real _getSamplesPerNode( const DensityEstimator< WeightDegree >& densityWeights , const FEMTreeNode* node , Point< Real , Dim > position , PointSupportKey& weightKey ) const; template< unsigned int WeightDegree , class WeightKey > void _getSampleDepthAndWeight( const DensityEstimator< WeightDegree >& densityWeights , const FEMTreeNode* node , Point< Real , Dim > position , WeightKey& weightKey , Real& depth , Real& weight ) const; template< unsigned int WeightDegree , class WeightKey > void _getSampleDepthAndWeight( const DensityEstimator< WeightDegree >& densityWeights , Point< Real , Dim > position , WeightKey& weightKey , Real& depth , Real& weight ) const; template< bool CreateNodes , class V , unsigned int ... DataSigs > void _splatPointData( FEMTreeNode* node , Point< Real , Dim > point , V v , SparseNodeData< V , UIntPack< DataSigs ... > >& data , PointSupportKey< UIntPack< FEMSignature< DataSigs >::Degree ... > >& dataKey ); template< bool CreateNodes , unsigned int WeightDegree , class V , unsigned int ... DataSigs > Real _splatPointData( const DensityEstimator< WeightDegree >& densityWeights , Point< Real , Dim > point , V v , SparseNodeData< V , UIntPack< DataSigs ... > >& data , PointSupportKey< IsotropicUIntPack< Dim , WeightDegree > >& weightKey , PointSupportKey< UIntPack< FEMSignature< DataSigs >::Degree ... > >& dataKey , LocalDepth minDepth , LocalDepth maxDepth , int dim , Real depthBias ); template< bool CreateNodes , unsigned int WeightDegree , class V , unsigned int ... DataSigs > Real _multiSplatPointData( const DensityEstimator< WeightDegree >* densityWeights , FEMTreeNode* node , Point< Real , Dim > point , V v , SparseNodeData< V , UIntPack< DataSigs ... > >& data , PointSupportKey< IsotropicUIntPack< Dim , WeightDegree > >& weightKey , PointSupportKey< UIntPack< FEMSignature< DataSigs >::Degree ... > >& dataKey , int dim ); template< unsigned int WeightDegree , class V , unsigned int ... DataSigs > Real _nearestMultiSplatPointData( const DensityEstimator< WeightDegree >* densityWeights , FEMTreeNode* node , Point< Real , Dim > point , V v , SparseNodeData< V , UIntPack< DataSigs ... > >& data , PointSupportKey< IsotropicUIntPack< Dim , WeightDegree > >& weightKey , int dim=Dim ); template< class V , class Coefficients , unsigned int D , unsigned int ... DataSigs > V _evaluate( const Coefficients& coefficients , Point< Real , Dim > p , const PointEvaluator< UIntPack< DataSigs ... > , IsotropicUIntPack< Dim , D > >& pointEvaluator , const ConstPointSupportKey< UIntPack< FEMSignature< DataSigs >::Degree ... > >& dataKey ) const; public: template< bool XMajor, class V, unsigned int ... DataSigs > Pointer(V) regularGridEvaluate(const DenseNodeData< V, UIntPack< DataSigs ... > >& coefficients, int& res, LocalDepth depth = -1, bool primal = false) const; template< bool XMajor, class V, unsigned int ... DataSigs > Pointer2(V) regularGridEvaluate_2darray(const DenseNodeData< V, UIntPack< DataSigs ... > >& coefficients, int& res, LocalDepth depth = -1, bool primal = false) const; template< bool XMajor , class V , unsigned int ... DataSigs > Pointer( V ) regularGridUpSample( const DenseNodeData< V , UIntPack< DataSigs ... > >& coefficients , LocalDepth depth=-1 ) const; template< bool XMajor , class V , unsigned int ... DataSigs > Pointer( V ) regularGridUpSample( const DenseNodeData< V , UIntPack< DataSigs ... > >& coefficients , const int begin[Dim] , const int end[Dim] , LocalDepth depth=-1 ) const; template< class V , unsigned int ... DataSigs > V average( const DenseNodeData< V , UIntPack< DataSigs ... > >& coefficients ) const; template< class V , unsigned int ... DataSigs > V average( const DenseNodeData< V , UIntPack< DataSigs ... > >& coefficients , const Real begin[Dim] , const Real end[Dim] ) const; template< typename T > struct HasNormalDataFunctor{}; template< unsigned int ... NormalSigs > struct HasNormalDataFunctor< UIntPack< NormalSigs ... > > { const SparseNodeData< Point< Real , Dim > , UIntPack< NormalSigs ... > >& normalInfo; HasNormalDataFunctor( const SparseNodeData< Point< Real , Dim > , UIntPack< NormalSigs ... > >& ni ) : normalInfo( ni ){ ; } bool operator() ( const FEMTreeNode* node ) const { const Point< Real , Dim >* n = normalInfo( node ); if( n ) { const Point< Real , Dim >& normal = *n; for( int d=0 ; d<Dim ; d++ ) if( normal[d]!=0 ) return true; } if( node->children ) for( int c=0 ; c<(1<<Dim) ; c++ ) if( (*this)( node->children + c ) ) return true; return false; } }; struct TrivialHasDataFunctor{ bool operator() ( const FEMTreeNode* node ) const { return true; } }; protected: // [NOTE] The input/output for this method is pre-scaled by weight template< typename T > bool _setInterpolationInfoFromChildren( FEMTreeNode* node , SparseNodeData< T , IsotropicUIntPack< Dim , FEMTrivialSignature > >& iInfo ) const; template< typename T , unsigned int PointD , typename ConstraintDual > SparseNodeData< DualPointInfo < Dim , Real , T , PointD > , IsotropicUIntPack< Dim , FEMTrivialSignature > > _densifyInterpolationInfoAndSetDualConstraints( const std::vector< PointSample >& samples , ConstraintDual constraintDual , int adaptiveExponent ) const; template< typename T , typename Data , unsigned int PointD , typename ConstraintDual > SparseNodeData< DualPointAndDataInfo< Dim , Real , Data , T , PointD > , IsotropicUIntPack< Dim , FEMTrivialSignature > > _densifyInterpolationInfoAndSetDualConstraints( const std::vector< PointSample >& samples , ConstPointer( Data ) sampleData , ConstraintDual constraintDual , int adaptiveExponent ) const; template< typename T , unsigned int PointD , typename ConstraintDual > SparseNodeData< DualPointInfoBrood < Dim , Real , T , PointD > , IsotropicUIntPack< Dim , FEMTrivialSignature > > _densifyChildInterpolationInfoAndSetDualConstraints( const std::vector< PointSample >& samples , ConstraintDual constraintDual , bool noRescale ) const; template< typename T , typename Data , unsigned int PointD , typename ConstraintDual > SparseNodeData< DualPointAndDataInfoBrood< Dim , Real , Data , T , PointD > , IsotropicUIntPack< Dim , FEMTrivialSignature > > _densifyChildInterpolationInfoAndSetDualConstraints( const std::vector< PointSample >& samples , ConstPointer( Data ) sampleData , ConstraintDual constraintDual , bool noRescale ) const; void _setSpaceValidityFlags( void ) const; template< unsigned int ... FEMSigs1 > void _setFEM1ValidityFlags( UIntPack< FEMSigs1 ... > ) const; template< unsigned int ... FEMSigs2 > void _setFEM2ValidityFlags( UIntPack< FEMSigs2 ... > ) const; template< unsigned int ... FEMSigs > void _setRefinabilityFlags( UIntPack< FEMSigs ... > ) const; template< class HasDataFunctor > void _clipTree( const HasDataFunctor& f , LocalDepth fullDepth ); public: template< unsigned int PointD , unsigned int ... FEMSigs > SparseNodeData< CumulativeDerivativeValues< Real , Dim , PointD > , IsotropicUIntPack< Dim , FEMTrivialSignature > > leafValues( const DenseNodeData< Real , UIntPack< FEMSigs ... > >& coefficients , int maxDepth=-1 ) const; protected: ///////////////////////////////////// // Evaluation Methods // // MultiGridFEMTreeData.Evaluation // ///////////////////////////////////// static const unsigned int CHILDREN = 1<<Dim; template< typename Pack , unsigned int PointD > struct _Evaluator{ }; template< unsigned int ... FEMSigs , unsigned int PointD > struct _Evaluator< UIntPack< FEMSigs ... > , PointD > { static_assert( Dim == sizeof...(FEMSigs) , "[ERROR] Number of signatures doesn't match dimension" ); typedef DynamicWindow< CumulativeDerivativeValues< double , Dim , PointD > , UIntPack< BSplineSupportSizes< FEMSignature< FEMSigs >::Degree >::SupportSize ... > > CenterStencil; typedef DynamicWindow< CumulativeDerivativeValues< double , Dim , PointD > , UIntPack< BSplineSupportSizes< FEMSignature< FEMSigs >::Degree >::SupportSize ... > > CornerStencil; typedef DynamicWindow< CumulativeDerivativeValues< double , Dim , PointD > , UIntPack< ( BSplineSupportSizes< FEMSignature< FEMSigs >::Degree >::BCornerSize + 1 ) ... > > BCornerStencil; typedef std::tuple< typename BSplineEvaluationData< FEMSigs >::template Evaluator< PointD > ... > Evaluators; typedef std::tuple< typename BSplineEvaluationData< FEMSigs >::template ChildEvaluator< PointD > ... > ChildEvaluators; struct StencilData { CenterStencil ccCenterStencil , pcCenterStencils[CHILDREN]; CornerStencil ccCornerStencil[CHILDREN] , pcCornerStencils[CHILDREN][CHILDREN]; BCornerStencil ccBCornerStencil[CHILDREN] , pcBCornerStencils[CHILDREN][CHILDREN]; }; Pointer( StencilData ) stencilData; Pointer( Evaluators ) evaluators; Pointer( ChildEvaluators ) childEvaluators; void set( LocalDepth depth ); _Evaluator( void ){ _pointEvaluator = NULL ; stencilData = NullPointer( StencilData ) , evaluators = NullPointer( Evaluators ) , childEvaluators = NullPointer( ChildEvaluators ); } ~_Evaluator( void ){ if( _pointEvaluator ) delete _pointEvaluator , _pointEvaluator = NULL ; if( stencilData ) DeletePointer( stencilData ) ; if( evaluators ) DeletePointer( evaluators ) ; if( childEvaluators ) DeletePointer( childEvaluators ); } protected: enum _CenterOffset{ CENTER=-1 , BACK=0 , FRONT=1 }; template< unsigned int _PointD=PointD > CumulativeDerivativeValues< double , Dim , _PointD > _values( unsigned int d , const int fIdx[Dim] , const int idx[Dim] , const _CenterOffset off[Dim] , bool parentChild ) const; template< unsigned int _PointD=PointD > CumulativeDerivativeValues< double , Dim , _PointD > _centerValues( unsigned int d , const int fIdx[Dim] , const int idx[Dim] , bool parentChild ) const; template< unsigned int _PointD=PointD > CumulativeDerivativeValues< double , Dim , _PointD > _cornerValues( unsigned int d , const int fIdx[Dim] , const int idx[Dim] , int corner , bool parentChild ) const; template< unsigned int _PointD=PointD , unsigned int I=0 > typename std::enable_if< I==Dim >::type _setDValues( unsigned int d , const int fIdx[] , const int cIdx[] , const _CenterOffset off[] , bool pc , double dValues[][_PointD+1] ) const{ } template< unsigned int _PointD=PointD , unsigned int I=0 > typename std::enable_if< I< Dim >::type _setDValues( unsigned int d , const int fIdx[] , const int cIdx[] , const _CenterOffset off[] , bool pc , double dValues[][_PointD+1] ) const { if( pc ) for( int dd=0 ; dd<=_PointD ; dd++ ) dValues[I][dd] = off[I]==CENTER ? std::get< I >( childEvaluators[d] ).centerValue( fIdx[I] , cIdx[I] , dd ) : std::get< I >( childEvaluators[d] ).cornerValue( fIdx[I] , cIdx[I]+off[I] , dd ); else for( int dd=0 ; dd<=_PointD ; dd++ ) dValues[I][dd] = off[I]==CENTER ? std::get< I >( evaluators[d] ).centerValue( fIdx[I] , cIdx[I] , dd ) : std::get< I >( evaluators[d] ).cornerValue( fIdx[I] , cIdx[I]+off[I] , dd ); _setDValues< _PointD , I+1 >( d , fIdx , cIdx , off , pc , dValues ); } template< unsigned int I=0 > typename std::enable_if< I==Dim >::type _setEvaluators( unsigned int maxDepth ){ } template< unsigned int I=0 > typename std::enable_if< I< Dim >::type _setEvaluators( unsigned int maxDepth ) { static const unsigned int FEMSig = UIntPack< FEMSigs ... >::template Get< I >(); for( unsigned int d=0 ; d<=maxDepth ; d++ ) BSplineEvaluationData< FEMSig >:: SetEvaluator( std::template get< I >( evaluators[d] ) , d ); for( unsigned int d=1 ; d<=maxDepth ; d++ ) BSplineEvaluationData< FEMSig >::SetChildEvaluator( std::template get< I >( childEvaluators[d] ) , d-1 ); _setEvaluators< I+1 >( maxDepth ); } typename FEMIntegrator::template PointEvaluator< UIntPack< FEMSigs ... > , IsotropicUIntPack< Dim , PointD > >* _pointEvaluator; friend FEMTree; }; template< class V , unsigned int _PointD , unsigned int ... FEMSigs , unsigned int PointD > CumulativeDerivativeValues< V , Dim , _PointD > _getCenterValues( const ConstPointSupportKey< UIntPack< FEMSignature< FEMSigs >::Degree ... > >& neighborKey , const FEMTreeNode* node , ConstPointer( V ) solution , ConstPointer( V ) coarseSolution , const _Evaluator< UIntPack< FEMSigs ... > , PointD >& evaluator , int maxDepth , bool isInterior ) const; template< class V , unsigned int _PointD , unsigned int ... FEMSigs , unsigned int PointD > CumulativeDerivativeValues< V , Dim , _PointD > _getCornerValues( const ConstPointSupportKey< UIntPack< FEMSignature< FEMSigs >::Degree ... > >& neighborKey , const FEMTreeNode* node , int corner , ConstPointer( V ) solution , ConstPointer( V ) coarseSolution , const _Evaluator< UIntPack< FEMSigs ... > , PointD >& evaluator , int maxDepth , bool isInterior ) const; template< class V , unsigned int _PointD , unsigned int ... FEMSigs , unsigned int PointD > CumulativeDerivativeValues< V , Dim , _PointD > _getValues ( const ConstPointSupportKey< UIntPack< FEMSignature< FEMSigs >::Degree ... > >& neighborKey , const FEMTreeNode* node , Point< Real , Dim > p , ConstPointer( V ) solution , ConstPointer( V ) coarseSolution , const _Evaluator< UIntPack< FEMSigs ... > , PointD >& evaluator , int maxDepth ) const; template< class V , unsigned int _PointD , unsigned int ... FEMSigs , unsigned int PointD > CumulativeDerivativeValues< V , Dim , _PointD > _getCornerValues( const ConstCornerSupportKey< UIntPack< FEMSignature< FEMSigs >::Degree ... > >& neighborKey , const FEMTreeNode* node , int corner , ConstPointer( V ) solution , ConstPointer( V ) coarseSolution , const _Evaluator< UIntPack< FEMSigs ... > , PointD >& evaluator , int maxDepth , bool isInterior ) const; template< unsigned int ... SupportSizes > struct CornerLoopData { typedef UIntPack< SupportSizes ... > _SupportSizes; // static const unsigned int supportSizes[] = { SupportSizes ... }; static const unsigned int supportSizes[]; unsigned int ccSize[1<<Dim] , pcSize[1<<Dim][1<<Dim]; unsigned int ccIndices[1<<Dim] [ WindowSize< _SupportSizes >::Size ]; unsigned int pcIndices[1<<Dim][1<<Dim][ WindowSize< _SupportSizes >::Size ]; CornerLoopData( void ) { int start[Dim] , end[Dim] , _start[Dim] , _end[Dim]; for( int c=0 ; c<(1<<Dim) ; c++ ) { ccSize[c] = 0; for( int dd=0 ; dd<Dim ; dd++ ) { start[dd] = 0 , end[dd] = supportSizes[dd]; if( (c>>dd) & 1 ) start[dd]++; else end [dd]--; } unsigned int idx[Dim]; WindowLoop< Dim >::Run ( start , end , [&]( int d , int i ){ idx[d] = i; } , [&]( void ){ ccIndices[c][ ccSize[c]++ ] = GetWindowIndex( _SupportSizes() , idx ); } ); for( int _c=0 ; _c<(1<<Dim) ; _c++ ) { pcSize[c][_c] = 0; for( int dd=0 ; dd<Dim ; dd++ ) { if( ( (_c>>dd) & 1 ) != ( (c>>dd) & 1 ) ) _start[dd] = 0 , _end[dd] = supportSizes[dd]; else _start[dd] = start[dd] , _end[dd] = end[dd]; } unsigned int idx[Dim]; WindowLoop< Dim >::Run ( _start , _end , [&]( int d , int i ){ idx[d] = i; } , [&]( void ){ pcIndices[c][_c][ pcSize[c][_c]++ ] = GetWindowIndex( _SupportSizes() , idx ); } ); } } } }; public: template< typename Pack , unsigned int PointD , typename T > struct _MultiThreadedEvaluator{ }; template< unsigned int ... FEMSigs , unsigned int PointD , typename T > struct _MultiThreadedEvaluator< UIntPack< FEMSigs ... > , PointD , T > { typedef UIntPack< FEMSigs ... > FEMSignatures; typedef UIntPack< FEMSignature< FEMSigs >::Degree ... > FEMDegrees; const FEMTree* _tree; int _threads; std::vector< ConstPointSupportKey< FEMDegrees > > _pointNeighborKeys; std::vector< ConstCornerSupportKey< FEMDegrees > > _cornerNeighborKeys; _Evaluator< FEMSignatures , PointD > _evaluator; const DenseNodeData< T , FEMSignatures >& _coefficients; DenseNodeData< T , FEMSignatures > _coarseCoefficients; public: _MultiThreadedEvaluator( const FEMTree* tree , const DenseNodeData< T , FEMSignatures >& coefficients , int threads=omp_get_max_threads() ); template< unsigned int _PointD=PointD > CumulativeDerivativeValues< T , Dim , _PointD > values( Point< Real , Dim > p , int thread=0 , const FEMTreeNode* node=NULL ); template< unsigned int _PointD=PointD > CumulativeDerivativeValues< T , Dim , _PointD > centerValues( const FEMTreeNode* node , int thread=0 ); template< unsigned int _PointD=PointD > CumulativeDerivativeValues< T , Dim , _PointD > cornerValues( const FEMTreeNode* node , int corner , int thread=0 ); }; template< typename Pack , unsigned int PointD , typename T=Real > using MultiThreadedEvaluator = _MultiThreadedEvaluator< Pack , PointD , T >; template< unsigned int DensityDegree > struct MultiThreadedWeightEvaluator { const FEMTree* _tree; int _threads; std::vector< ConstPointSupportKey< IsotropicUIntPack< Dim , DensityDegree > > > _neighborKeys; const DensityEstimator< DensityDegree >& _density; public: MultiThreadedWeightEvaluator( const FEMTree* tree , const DensityEstimator< DensityDegree >& density , int threads=omp_get_max_threads() ); Real weight( Point< Real , Dim > p , int thread=0 ); }; static double _MaxMemoryUsage , _LocalMemoryUsage; void _reorderDenseOrSparseNodeData( const int* , size_t ){ ; } template< class Data , unsigned int ... FEMSigs , class ... DenseOrSparseNodeData > void _reorderDenseOrSparseNodeData( const int* map , size_t sz , SparseNodeData< Data , UIntPack< FEMSigs ... > >* sData , DenseOrSparseNodeData* ... data ) { if( sData ) sData->_remapIndices( map , (int)sz ); _reorderDenseOrSparseNodeData( map , sz , data ... ); } template< class Data , unsigned int ... FEMSigs , class ... DenseOrSparseNodeData > void _reorderDenseOrSparseNodeData( const int* map , size_t sz , DenseNodeData< Data , UIntPack< FEMSigs ... > >* dData , DenseOrSparseNodeData* ... data ) { if( dData ) dData->_remapIndices( map , sz ); _reorderDenseOrSparseNodeData( map , sz , data ... ); } public: static double MaxMemoryUsage( void ){ return _MaxMemoryUsage; } static double LocalMemoryUsage( void ){ return _LocalMemoryUsage; } static void ResetLocalMemoryUsage( void ){ _LocalMemoryUsage = 0; } static double MemoryUsage( void ); FEMTree( int blockSize ); FEMTree( FILE* fp , int blockSize ); ~FEMTree( void ) { if( _tree ) for( int c=0 ; c<(1<<Dim) ; c++ ) _tree[c].cleanChildren( nodeAllocator ); if( nodeAllocator ) delete nodeAllocator; } void write( FILE* fp ) const; static void WriteParameter( FILE* fp ) { FEMTreeRealType realType; if ( typeid( Real )==typeid( float ) ) realType=FEM_TREE_REAL_FLOAT; else if( typeid( Real )==typeid( double ) ) realType=FEM_TREE_REAL_DOUBLE; else ERROR_OUT( "Unrecognized real type" ); fwrite( &realType , sizeof(FEMTreeRealType) , 1 , fp ); int dim = Dim; fwrite( &dim , sizeof(int) , 1 , fp ); } template< unsigned int LeftRadius , unsigned int RightRadius , class ... DenseOrSparseNodeData > void thicken( FEMTreeNode** nodes , size_t nodeCount , DenseOrSparseNodeData* ... data ); template< unsigned int LeftRadius , unsigned int RightRadius , class IsThickenNode , class ... DenseOrSparseNodeData > void thicken( IsThickenNode F , DenseOrSparseNodeData* ... data ); template< unsigned int Radius , class ... DenseOrSparseNodeData > void thicken( FEMTreeNode** nodes , size_t nodeCount , DenseOrSparseNodeData* ... data ){ thicken< Radius , Radius >( nodes , nodeCount , data ... ); } template< unsigned int Radius , class IsThickenNode , class ... DenseOrSparseNodeData > void thicken( IsThickenNode F , DenseOrSparseNodeData* ... data ){ thicken< Radius , Radius >( F , data ... ); } template< unsigned int DensityDegree > typename FEMTree::template DensityEstimator< DensityDegree >* setDensityEstimator( const std::vector< PointSample >& samples , LocalDepth splatDepth , Real samplesPerNode , int coDimension ); template< unsigned int ... NormalSigs , unsigned int DensityDegree , class Data > #if defined(_WIN32) || defined(_WIN64) SparseNodeData< Point< Real , Dim > , UIntPack< NormalSigs ... > > setNormalField( UIntPack< NormalSigs ... > , const std::vector< PointSample >& samples , const std::vector< Data >& normalData , const DensityEstimator< DensityDegree >* density , Real& pointWeightSum , std::function< Real ( Real ) > BiasFunction = []( Real ){ return 0.f; } ); #else // !_WIN32 && !_WIN64 SparseNodeData< Point< Real , Dim > , UIntPack< NormalSigs ... > > setNormalField( UIntPack< NormalSigs ... > , const std::vector< PointSample >& samples , const std::vector< Data >& normalData , const DensityEstimator< DensityDegree >* density , Real& pointWeightSum , std::function< Real ( Real ) > BiasFunction = []( Real ){ return (Real)0; } ); #endif // _WIN32 || _WIN64 template< unsigned int DataSig , bool CreateNodes , unsigned int DensityDegree , class Data > SparseNodeData< Data , IsotropicUIntPack< Dim , DataSig > > setSingleDepthDataField( const std::vector< PointSample >& samples , const std::vector< Data >& sampleData , const DensityEstimator< DensityDegree >* density ); template< unsigned int DataSig , bool CreateNodes , unsigned int DensityDegree , class Data > SparseNodeData< ProjectiveData< Data , Real > , IsotropicUIntPack< Dim , DataSig > > setDataField( const std::vector< PointSample >& samples , std::vector< Data >& sampleData , const DensityEstimator< DensityDegree >* density , bool nearest=false ); template< unsigned int MaxDegree , class HasDataFunctor , class ... DenseOrSparseNodeData > void finalizeForMultigrid( LocalDepth fullDepth , const HasDataFunctor F , DenseOrSparseNodeData* ... data ); template< unsigned int ... FEMSigs > DenseNodeData< Real , UIntPack< FEMSigs ... > > initDenseNodeData( UIntPack< FEMSigs ... > ) const; template< class Data , unsigned int ... FEMSigs > DenseNodeData< Data , UIntPack< FEMSigs ... > > initDenseNodeData( UIntPack< FEMSigs ... > ) const; // Add multiple-dimensions -> one-dimension constraints template< typename T , unsigned int ... FEMDegrees , unsigned int ... FEMSigs , unsigned int ... CDegrees , unsigned int ... CSigs , unsigned int CDim > void addFEMConstraints( typename BaseFEMIntegrator::template Constraint< UIntPack< FEMDegrees ... > , UIntPack< CDegrees ... > , CDim >& F , const _SparseOrDenseNodeData< Point< T , CDim > , UIntPack< CSigs ... > >& coefficients , DenseNodeData< T , UIntPack< FEMSigs ... > >& constraints , LocalDepth maxDepth ) const { typedef SparseNodeData< Point< T , CDim > , UIntPack< CSigs ... > > SparseType; typedef DenseNodeData< Point< T , CDim > , UIntPack< CSigs ... > > DenseType; static_assert( sizeof...( FEMDegrees )==Dim && sizeof...( FEMSigs )==Dim && sizeof...( CDegrees )==Dim && sizeof...( CSigs )==Dim , "[ERROR] Dimensions don't match" ); static_assert( UIntPack< FEMDegrees ... >::template Compare< UIntPack< FEMSignature< FEMSigs >::Degree ... > >::Equal , "[ERROR] FEM signature and degrees don't match" ); static_assert( UIntPack< CDegrees ... >::template Compare< UIntPack< FEMSignature< CSigs >::Degree ... > >::Equal , "[ERROR] Constraint signature and degrees don't match" ); if ( typeid(coefficients)==typeid(SparseType) ) return _addFEMConstraints< T >( UIntPack< FEMSigs ... >() , UIntPack< CSigs ... >() , F , static_cast< const SparseType& >( coefficients ) , constraints() , maxDepth ); else if( typeid(coefficients)==typeid( DenseType) ) return _addFEMConstraints< T >( UIntPack< FEMSigs ... >() , UIntPack< CSigs ... >() , F , static_cast< const DenseType& >( coefficients ) , constraints() , maxDepth ); else return _addFEMConstraints< T >( UIntPack< FEMSigs ... >() , UIntPack< CSigs ... >() , F , coefficients , constraints() , maxDepth ); } // Add one-dimensions -> one-dimension constraints (with distinct signatures) template< typename T , unsigned int ... FEMDegrees , unsigned int ... FEMSigs , unsigned int ... CDegrees , unsigned int ... CSigs > void addFEMConstraints( typename BaseFEMIntegrator::template Constraint< UIntPack< FEMDegrees ... > , UIntPack< CDegrees ... > , 1 >& F , const _SparseOrDenseNodeData< T , UIntPack< CSigs ... > >& coefficients , DenseNodeData< T , UIntPack< FEMSigs ... > >& constraints , LocalDepth maxDepth ) const { typedef SparseNodeData< T , UIntPack< CSigs ... > > SparseType; typedef DenseNodeData< T , UIntPack< CSigs ... > > DenseType; static_assert( sizeof...( FEMDegrees )==Dim && sizeof...( FEMSigs )==Dim && sizeof...( CDegrees )==Dim && sizeof...( CSigs )==Dim , "[ERROR] Dimensions don't match" ); static_assert( UIntPack< FEMDegrees ... >::template Compare< UIntPack< FEMSignature< FEMSigs >::Degree ... > >::Equal , "[ERROR] FEM signature and degrees don't match" ); static_assert( UIntPack< CDegrees ... >::template Compare< UIntPack< FEMSignature< CSigs >::Degree ... > >::Equal , "[ERROR] Constaint signature and degrees don't match" ); if ( typeid(coefficients)==typeid(SparseType) ) return _addFEMConstraints< T >( UIntPack< FEMSigs ... >() , UIntPack< CSigs ... >() , F , static_cast< const SparseType& >( coefficients ) , constraints() , maxDepth ); else if( typeid(coefficients)==typeid( DenseType) ) return _addFEMConstraints< T >( UIntPack< FEMSigs ... >() , UIntPack< CSigs ... >() , F , static_cast< const DenseType& >( coefficients ) , constraints() , maxDepth ); else return _addFEMConstraints< T >( UIntPack< FEMSigs ... >() , UIntPack< CSigs ... >() , F , coefficients , constraints() , maxDepth ); } // Add one-dimensions -> one-dimension constraints (with the same signatures) template< typename T , unsigned int ... FEMDegrees , unsigned int ... FEMSigs > // void addFEMConstraints( typename BaseFEMIntegrator::template System< UIntPack< FEMDegrees ... > >& F , const SparseNodeData< T , UIntPack< FEMSigs ... > >& coefficients , _SparseOrDenseNodeData< T , UIntPack< FEMSigs ... > >& constraints , LocalDepth maxDepth ) const void addFEMConstraints( typename BaseFEMIntegrator::template System< UIntPack< FEMDegrees ... > >& F , const _SparseOrDenseNodeData< T , UIntPack< FEMSigs ... > >& coefficients , DenseNodeData< T , UIntPack< FEMSigs ... > >& constraints , LocalDepth maxDepth ) const { typedef SparseNodeData< T , UIntPack< FEMSigs ... > > SparseType; typedef DenseNodeData< T , UIntPack< FEMSigs ... > > DenseType; static_assert( sizeof...( FEMDegrees )==Dim && sizeof...( FEMSigs )==Dim , "[ERROR] Dimensions don't match" ); static_assert( UIntPack< FEMDegrees ... >::template Compare< UIntPack< FEMSignature< FEMSigs >::Degree ... > >::Equal , "[ERROR] FEM signatures and degrees don't match" ); typename BaseFEMIntegrator::template SystemConstraint< UIntPack< FEMDegrees ... > > _F( F ); if ( typeid(coefficients)==typeid(SparseType) ) return _addFEMConstraints< T >( UIntPack< FEMSigs ... >() , UIntPack< FEMSigs ... >() , _F , static_cast< const SparseType& >( coefficients ) , constraints() , maxDepth ); else if( typeid(coefficients)==typeid( DenseType) ) return _addFEMConstraints< T >( UIntPack< FEMSigs ... >() , UIntPack< FEMSigs ... >() , _F , static_cast< const DenseType& >( coefficients ) , constraints() , maxDepth ); else return _addFEMConstraints< T >( UIntPack< FEMSigs ... >() , UIntPack< FEMSigs ... >() , _F , coefficients , constraints() , maxDepth ); } // Add interpolation constraints template< typename T , unsigned int ... FEMSigs , unsigned int PointD , unsigned int ... PointDs > typename std::enable_if< (sizeof...(PointDs)!=0) >::type addInterpolationConstraints( DenseNodeData< T , UIntPack< FEMSigs ... > >& constraints , LocalDepth maxDepth , const InterpolationInfo< T , PointD >& iInfo , const InterpolationInfo< T , PointDs >& ... iInfos ) const { addInterpolationConstraints< T , FEMSigs ... >( constraints , maxDepth , iInfo ); addInterpolationConstraints< T , FEMSigs ... >( constraints , maxDepth , iInfos ... ); } template< typename T , unsigned int ... FEMSigs , unsigned int PointD > void addInterpolationConstraints( DenseNodeData< T , UIntPack< FEMSigs ... > >& constraints , LocalDepth maxDepth , const InterpolationInfo< T , PointD >& interpolationInfo ) const; // Real template< unsigned int ... FEMDegrees1 , unsigned int ... FEMSigs1 , unsigned int ... FEMDegrees2 , unsigned int ... FEMSigs2 > double dot( typename BaseFEMIntegrator::Constraint< UIntPack< FEMDegrees1 ... > , UIntPack< FEMDegrees2 ... > , 1 >& F , const _SparseOrDenseNodeData< Real , UIntPack< FEMSigs1 ... > >& coefficients1 , const _SparseOrDenseNodeData< Real , UIntPack< FEMSigs2 ... > >& coefficients2 ) const { typedef SparseNodeData< Real , UIntPack< FEMSigs1 ... > > SparseType1; typedef DenseNodeData< Real , UIntPack< FEMSigs1 ... > > DenseType1; typedef SparseNodeData< Real , UIntPack< FEMSigs2 ... > > SparseType2; typedef DenseNodeData< Real , UIntPack< FEMSigs2 ... > > DenseType2; static_assert( sizeof...( FEMDegrees1 )==Dim && sizeof...( FEMSigs1 )==Dim && sizeof...( FEMDegrees2 )==Dim && sizeof...( FEMSigs2 )==Dim , "[ERROR] Dimensions don't match" ); static_assert( UIntPack< FEMDegrees1 ... >::template Compare< UIntPack< FEMSignature< FEMSigs1 >::Degree ... > >::Equal , "[ERROR] FEM signature and degrees don't match" ); static_assert( UIntPack< FEMDegrees2 ... >::template Compare< UIntPack< FEMSignature< FEMSigs2 >::Degree ... > >::Equal , "[ERROR] FEM signature and degrees don't match" ); if ( typeid(coefficients1)==typeid(SparseType1) && typeid(coefficients2)==typeid(SparseType2) ) return _dot< Real >( UIntPack< FEMSigs1 ... >() , UIntPack< FEMSigs2 ... >() , F , static_cast< const SparseType1& >( coefficients1 ) , static_cast< const SparseType2& >( coefficients2 ) , []( Real v , Real w ){ return v*w; } ); else if( typeid(coefficients1)==typeid(SparseType1) && typeid(coefficients2)==typeid( DenseType2) ) return _dot< Real >( UIntPack< FEMSigs1 ... >() , UIntPack< FEMSigs2 ... >() , F , static_cast< const SparseType1& >( coefficients1 ) , static_cast< const DenseType2& >( coefficients2 ) , []( Real v , Real w ){ return v*w; } ); else if( typeid(coefficients1)==typeid( DenseType1) && typeid(coefficients2)==typeid( DenseType2) ) return _dot< Real >( UIntPack< FEMSigs1 ... >() , UIntPack< FEMSigs2 ... >() , F , static_cast< const DenseType1& >( coefficients1 ) , static_cast< const DenseType2& >( coefficients2 ) , []( Real v , Real w ){ return v*w; } ); else if( typeid(coefficients1)==typeid( DenseType1) && typeid(coefficients2)==typeid(SparseType2) ) return _dot< Real >( UIntPack< FEMSigs1 ... >() , UIntPack< FEMSigs2 ... >() , F , static_cast< const DenseType1& >( coefficients1 ) , static_cast< const SparseType2& >( coefficients2 ) , []( Real v , Real w ){ return v*w; } ); else return _dot< Real >( UIntPack< FEMSigs1 ... >() , UIntPack< FEMSigs2 ... >() , F , coefficients1 , coefficients2 , []( Real v , Real w ){ return v*w; } ); } template< unsigned int ... FEMDegrees , unsigned int ... FEMSigs > double dot( typename BaseFEMIntegrator::System< UIntPack< FEMDegrees ... > >& F , const _SparseOrDenseNodeData< Real , UIntPack< FEMSigs ... > >& coefficients1 , const _SparseOrDenseNodeData< Real , UIntPack< FEMSigs ... > >& coefficients2 ) const { typedef SparseNodeData< Real , UIntPack< FEMSigs ... > > SparseType; typedef DenseNodeData< Real , UIntPack< FEMSigs ... > > DenseType; static_assert( sizeof...( FEMDegrees )==Dim && sizeof...( FEMSigs )==Dim , "[ERROR] Dimensions don't match" ); static_assert( UIntPack< FEMDegrees ... >::template Compare< UIntPack< FEMSignature< FEMSigs >::Degree ... > >::Equal , "[ERROR] FEM signatures and degrees don't match" ); typename BaseFEMIntegrator::template SystemConstraint< UIntPack< FEMDegrees ... > > _F( F ); if ( typeid(coefficients1)==typeid(SparseType) && typeid(coefficients2)==typeid(SparseType) ) return _dot< Real >( UIntPack< FEMSigs ... >() , UIntPack< FEMSigs ... >() , _F , static_cast< const SparseType& >( coefficients1 ) , static_cast< const SparseType& >( coefficients2 ) , []( Real v , Real w ){ return v*w; } ); else if( typeid(coefficients1)==typeid(SparseType) && typeid(coefficients2)==typeid( DenseType) ) return _dot< Real >( UIntPack< FEMSigs ... >() , UIntPack< FEMSigs ... >() , _F , static_cast< const SparseType& >( coefficients1 ) , static_cast< const DenseType& >( coefficients2 ) , []( Real v , Real w ){ return v*w; } ); else if( typeid(coefficients1)==typeid( DenseType) && typeid(coefficients2)==typeid( DenseType) ) return _dot< Real >( UIntPack< FEMSigs ... >() , UIntPack< FEMSigs ... >() , _F , static_cast< const DenseType& >( coefficients1 ) , static_cast< const DenseType& >( coefficients2 ) , []( Real v , Real w ){ return v*w; } ); else if( typeid(coefficients1)==typeid( DenseType) && typeid(coefficients2)==typeid(SparseType) ) return _dot< Real >( UIntPack< FEMSigs ... >() , UIntPack< FEMSigs ... >() , _F , static_cast< const DenseType& >( coefficients1 ) , static_cast< const SparseType& >( coefficients2 ) , []( Real v , Real w ){ return v*w; } ); else return _dot< Real >( UIntPack< FEMSigs ... >() , UIntPack< FEMSigs ... >() , _F , coefficients1 , coefficients2 , []( Real v , Real w ){ return v*w; } ); } template< unsigned int ... FEMDegrees , unsigned int ... FEMSigs > double squareNorm( typename BaseFEMIntegrator::template System< UIntPack< FEMDegrees ... > >& F , const _SparseOrDenseNodeData< Real , UIntPack< FEMSigs ... > >& coefficients ) const { typedef SparseNodeData< Real , UIntPack< FEMSigs ... > > SparseType; typedef DenseNodeData< Real , UIntPack< FEMSigs ... > > DenseType; typename BaseFEMIntegrator::template SystemConstraint< UIntPack< FEMDegrees ... > > _F( F ); if ( typeid(coefficients)==typeid(SparseType) ) return _dot< Real >( UIntPack< FEMSigs ... >() , UIntPack< FEMSigs ... >() , _F , static_cast< const SparseType& >( coefficients ) , static_cast< const SparseType& >( coefficients ) , []( Real v , Real w ){ return v*w; } ); else if( typeid(coefficients)==typeid( DenseType) ) return _dot< Real >( UIntPack< FEMSigs ... >() , UIntPack< FEMSigs ... >() , _F , static_cast< const DenseType& >( coefficients ) , static_cast< const DenseType& >( coefficients ) , []( Real v , Real w ){ return v*w; } ); else return _dot< Real >( UIntPack< FEMSigs ... >() , UIntPack< FEMSigs ... >() , _F , coefficients , coefficients , []( Real v , Real w ){ return v*w; } ); } template< unsigned int ... FEMSigs1 , unsigned int ... FEMSigs2 , unsigned int ... PointDs > double interpolationDot( const DenseNodeData< Real , UIntPack< FEMSigs1 ... > >& coefficients1 , const DenseNodeData< Real , UIntPack< FEMSigs2 ... > >& coefficients2 , const InterpolationInfo< Real , PointDs >* ... iInfos ) const { static_assert( sizeof...( FEMSigs1 )==Dim && sizeof...( FEMSigs2 )==Dim , "[ERROR] Dimensions don't match" ); return _inteprolationDot( UIntPack< FEMSigs1 ... >() , UIntPack< FEMSigs2 ... >() , coefficients1 , coefficients2 , []( Real v , Real w ){ return v*w; } , iInfos... ); } template< unsigned int ... FEMSigs , unsigned int ... PointDs > double interpolationSquareNorm( const DenseNodeData< Real , UIntPack< FEMSigs ... > >& coefficients , const InterpolationInfo< Real , PointDs >* ... iInfos ) const { static_assert( sizeof...( FEMSigs )==Dim , "[ERROR] Dimensions don't match" ); return _interpolationDot< Real >( UIntPack< FEMSigs ... >() , UIntPack< FEMSigs ... >() , coefficients , coefficients , []( Real v , Real w ){ return v*w; } , iInfos... ); } // Generic template< typename T , typename TDotT , unsigned int ... FEMDegrees1 , unsigned int ... FEMSigs1 , unsigned int ... FEMDegrees2 , unsigned int ... FEMSigs2 > double dot( TDotT Dot , typename BaseFEMIntegrator::Constraint< UIntPack< FEMDegrees1 ... > , UIntPack< FEMDegrees2 ... > , 1 >& F , const _SparseOrDenseNodeData< T , UIntPack< FEMSigs1 ... > >& coefficients1 , const _SparseOrDenseNodeData< T , UIntPack< FEMSigs2 ... > >& coefficients2 ) const { typedef SparseNodeData< T , UIntPack< FEMSigs1 ... > > SparseType1; typedef DenseNodeData< T , UIntPack< FEMSigs1 ... > > DenseType1; typedef SparseNodeData< T , UIntPack< FEMSigs2 ... > > SparseType2; typedef DenseNodeData< T , UIntPack< FEMSigs2 ... > > DenseType2; static_assert( sizeof...( FEMDegrees1 )==Dim && sizeof...( FEMSigs1 )==Dim && sizeof...( FEMDegrees2 )==Dim && sizeof...( FEMSigs2 )==Dim , "[ERROR] Dimensions don't match" ); static_assert( UIntPack< FEMDegrees1 ... >::template Compare< UIntPack< FEMSignature< FEMSigs1 >::Degree ... > >::Equal , "[ERROR] FEM signature and degrees don't match" ); static_assert( UIntPack< FEMDegrees2 ... >::template Compare< UIntPack< FEMSignature< FEMSigs2 >::Degree ... > >::Equal , "[ERROR] FEM signature and degrees don't match" ); if ( typeid(coefficients1)==typeid(SparseType1) && typeid(coefficients2)==typeid(SparseType2) ) return _dot< T >( UIntPack< FEMSigs1 ... >() , UIntPack< FEMSigs2 ... >() , F , static_cast< const SparseType1& >( coefficients1 ) , static_cast< const SparseType2& >( coefficients2 ) , Dot ); else if( typeid(coefficients1)==typeid(SparseType1) && typeid(coefficients2)==typeid( DenseType2) ) return _dot< T >( UIntPack< FEMSigs1 ... >() , UIntPack< FEMSigs2 ... >() , F , static_cast< const SparseType1& >( coefficients1 ) , static_cast< const DenseType2& >( coefficients2 ) , Dot ); else if( typeid(coefficients1)==typeid( DenseType1) && typeid(coefficients2)==typeid( DenseType2) ) return _dot< T >( UIntPack< FEMSigs1 ... >() , UIntPack< FEMSigs2 ... >() , F , static_cast< const DenseType1& >( coefficients1 ) , static_cast< const DenseType2& >( coefficients2 ) , Dot ); else if( typeid(coefficients1)==typeid( DenseType1) && typeid(coefficients2)==typeid(SparseType2) ) return _dot< T >( UIntPack< FEMSigs1 ... >() , UIntPack< FEMSigs2 ... >() , F , static_cast< const DenseType1& >( coefficients1 ) , static_cast< const SparseType2& >( coefficients2 ) , Dot ); else return _dot< T >( UIntPack< FEMSigs1 ... >() , UIntPack< FEMSigs2 ... >() , F , coefficients1 , coefficients2 , Dot ); } template< typename T , typename TDotT , unsigned int ... FEMDegrees , unsigned int ... FEMSigs > double dot( TDotT Dot , typename BaseFEMIntegrator::System< UIntPack< FEMDegrees ... > >& F , const _SparseOrDenseNodeData< T , UIntPack< FEMSigs ... > >& coefficients1 , const _SparseOrDenseNodeData< T , UIntPack< FEMSigs ... > >& coefficients2 ) const { typedef SparseNodeData< T , UIntPack< FEMSigs ... > > SparseType; typedef DenseNodeData< T , UIntPack< FEMSigs ... > > DenseType; static_assert( sizeof...( FEMDegrees )==Dim && sizeof...( FEMSigs )==Dim , "[ERROR] Dimensions don't match" ); static_assert( UIntPack< FEMDegrees ... >::template Compare< UIntPack< FEMSignature< FEMSigs >::Degree ... > >::Equal , "[ERROR] FEM signatures and degrees don't match" ); typename BaseFEMIntegrator::template SystemConstraint< UIntPack< FEMDegrees ... > > _F( F ); if ( typeid(coefficients1)==typeid(SparseType) && typeid(coefficients2)==typeid(SparseType) ) return _dot< T >( UIntPack< FEMSigs ... >() , UIntPack< FEMSigs ... >() , _F , static_cast< const SparseType& >( coefficients1 ) , static_cast< const SparseType& >( coefficients2 ) , Dot ); else if( typeid(coefficients1)==typeid(SparseType) && typeid(coefficients2)==typeid( DenseType) ) return _dot< T >( UIntPack< FEMSigs ... >() , UIntPack< FEMSigs ... >() , _F , static_cast< const SparseType& >( coefficients1 ) , static_cast< const DenseType& >( coefficients2 ) , Dot ); else if( typeid(coefficients1)==typeid( DenseType) && typeid(coefficients2)==typeid( DenseType) ) return _dot< T >( UIntPack< FEMSigs ... >() , UIntPack< FEMSigs ... >() , _F , static_cast< const DenseType& >( coefficients1 ) , static_cast< const DenseType& >( coefficients2 ) , Dot ); else if( typeid(coefficients1)==typeid( DenseType) && typeid(coefficients2)==typeid(SparseType) ) return _dot< T >( UIntPack< FEMSigs ... >() , UIntPack< FEMSigs ... >() , _F , static_cast< const DenseType& >( coefficients1 ) , static_cast< const SparseType& >( coefficients2 ) , Dot ); else return _dot< T >( UIntPack< FEMSigs ... >() , UIntPack< FEMSigs ... >() , _F , coefficients1 , coefficients2 , Dot ); } template< typename T , typename TDotT , unsigned int ... FEMDegrees , unsigned int ... FEMSigs > double squareNorm( TDotT Dot , typename BaseFEMIntegrator::template System< UIntPack< FEMDegrees ... > >& F , const _SparseOrDenseNodeData< T , UIntPack< FEMSigs ... > >& coefficients ) const { typedef SparseNodeData< T , UIntPack< FEMSigs ... > > SparseType; typedef DenseNodeData< T , UIntPack< FEMSigs ... > > DenseType; typename BaseFEMIntegrator::template SystemConstraint< UIntPack< FEMDegrees ... > > _F( F ); if ( typeid(coefficients)==typeid(SparseType) ) return _dot< T >( UIntPack< FEMSigs ... >() , UIntPack< FEMSigs ... >() , _F , static_cast< const SparseType& >( coefficients ) , static_cast< const SparseType& >( coefficients ) , Dot ); else if( typeid(coefficients)==typeid( DenseType) ) return _dot< T >( UIntPack< FEMSigs ... >() , UIntPack< FEMSigs ... >() , _F , static_cast< const DenseType& >( coefficients ) , static_cast< const DenseType& >( coefficients ) , Dot ); else return _dot< T >( UIntPack< FEMSigs ... >() , UIntPack< FEMSigs ... >() , _F , coefficients , coefficients , Dot ); } template< typename T , typename TDotT , unsigned int ... FEMSigs1 , unsigned int ... FEMSigs2 , unsigned int ... PointDs > double interpolationDot( TDotT Dot , const DenseNodeData< T , UIntPack< FEMSigs1 ... > >& coefficients1 , const DenseNodeData< T , UIntPack< FEMSigs2 ... > >& coefficients2 , const InterpolationInfo< T , PointDs >* ... iInfos ) const { static_assert( sizeof...( FEMSigs1 )==Dim && sizeof...( FEMSigs2 )==Dim , "[ERROR] Dimensions don't match" ); return _interpolationDot< T >( UIntPack< FEMSigs1 ... >() , UIntPack< FEMSigs2 ... >() , coefficients1 , coefficients2 , Dot , iInfos... ); } template< typename T , typename TDotT , unsigned int ... FEMSigs , unsigned int ... PointDs > double interpolationSquareNorm( TDotT Dot , const DenseNodeData< T , UIntPack< FEMSigs ... > >& coefficients , const InterpolationInfo< T , PointDs >* ... iInfos ) const { static_assert( sizeof...( FEMSigs )==Dim , "[ERROR] Dimensions don't match" ); return _interpolationDot< T >( UIntPack< FEMSigs ... >() , UIntPack< FEMSigs ... >() , coefficients , coefficients , Dot , iInfos... ); } template< typename T , unsigned int ... PointDs , unsigned int ... FEMSigs > SparseMatrix< Real > systemMatrix( UIntPack< FEMSigs ... > , typename BaseFEMIntegrator::System< UIntPack< FEMSignature< FEMSigs >::Degree ... > >& F , LocalDepth depth , const InterpolationInfo< T , PointDs >* ... interpolationInfo ) const; template< typename T , unsigned int ... PointDs , unsigned int ... FEMSigs > SparseMatrix< Real > prolongedSystemMatrix( UIntPack< FEMSigs ... > , typename BaseFEMIntegrator::System< UIntPack< FEMSignature< FEMSigs >::Degree ... > >& F , LocalDepth highDepth , const InterpolationInfo< T , PointDs >* ... interpolationInfo ) const; template< unsigned int ... FEMSigs > SparseMatrix< Real > downSampleMatrix( UIntPack< FEMSigs ... > , LocalDepth highDepth ) const; template< typename T , unsigned int ... PointDs , unsigned int ... FEMSigs > SparseMatrix< Real > fullSystemMatrix( UIntPack< FEMSigs ... > , typename BaseFEMIntegrator::System< UIntPack< FEMSignature< FEMSigs >::Degree ... > >& F , LocalDepth depth , bool nonRefinableOnly , const InterpolationInfo< T , PointDs >* ... interpolationInfo ) const; struct SolverInfo { protected: struct _IterFunction { _IterFunction( int i ) : _i0(i) , _type(0) {} _IterFunction( std::function< int ( int ) > iFunction ) : _i1(iFunction) , _type(1) {} _IterFunction( std::function< int ( bool , int ) > iFunction ) : _i2(iFunction) , _type(2) {} _IterFunction( std::function< int ( int , bool , int ) > iFunction ) : _i3(iFunction) , _type(3) {} _IterFunction& operator = ( int i ){ *this = _IterFunction(i) ; return *this; } _IterFunction& operator = ( std::function< int ( int ) > iFunction ){ *this = _IterFunction(iFunction) ; return *this; } _IterFunction& operator = ( std::function< int ( bool , int ) > iFunction ){ *this = _IterFunction(iFunction) ; return *this; } _IterFunction& operator = ( std::function< int ( int , bool , int ) > iFunction ){ *this = _IterFunction(iFunction) ; return *this; } int operator()( int vCycle , bool restriction , int depth ) const { switch( _type ) { case 0: return _i0; case 1: return _i1( depth ); case 2: return _i2( restriction , depth ); case 3: return _i3( vCycle , restriction , depth ); default: return 0; } } protected: int _i0; std::function< int ( int ) > _i1; std::function< int ( bool , int ) > _i2; std::function< int ( int i3 , bool , int ) > _i3; int _type; }; public: // How to solve bool wCycle; LocalDepth cgDepth; bool cascadic; unsigned int sliceBlockSize; bool useSupportWeights , useProlongationSupportWeights; std::function< Real ( Real , Real ) > sorRestrictionFunction; std::function< Real ( Real , Real ) > sorProlongationFunction; _IterFunction iters; int vCycles; double cgAccuracy; int baseDepth , baseVCycles; // What to output bool verbose , showResidual; int showGlobalResidual; SolverInfo( void ) : cgDepth(0) , wCycle(false) , cascadic(true) , iters(1) , vCycles(1) , cgAccuracy(0.) , verbose(false) , showResidual(false) , showGlobalResidual(SHOW_GLOBAL_RESIDUAL_NONE) , sliceBlockSize(1) , sorRestrictionFunction( []( Real , Real ){ return (Real)1; } ) , sorProlongationFunction( []( Real , Real ){ return (Real)1; } ) , useSupportWeights( false ) , useProlongationSupportWeights( false ) , baseDepth(0) , baseVCycles(1) { } }; // Solve the linear system template< unsigned int ... FEMSigs , typename T , typename TDotT , unsigned int ... PointDs > void solveSystem( UIntPack< FEMSigs ... > , typename BaseFEMIntegrator::template System< UIntPack< FEMSignature< FEMSigs >::Degree ... > >& F , const DenseNodeData< T , UIntPack< FEMSigs ... > >& constraints , DenseNodeData< T , UIntPack< FEMSigs ... > >& solution , TDotT Dot , LocalDepth maxSolveDepth , const SolverInfo& solverInfo , InterpolationInfo< T , PointDs >* ... iData ) const; template< unsigned int ... FEMSigs , typename T , typename TDotT , unsigned int ... PointDs > DenseNodeData< T , UIntPack< FEMSigs ... > > solveSystem( UIntPack< FEMSigs ... > , typename BaseFEMIntegrator::template System< UIntPack< FEMSignature< FEMSigs >::Degree ... > >& F , const DenseNodeData< T , UIntPack< FEMSigs ... > >& constraints , TDotT Dot , LocalDepth maxSolveDepth , const SolverInfo& solverInfo , InterpolationInfo< T , PointDs >* ... iData ) const; template< unsigned int ... FEMSigs , unsigned int ... PointDs > void solveSystem( UIntPack< FEMSigs ... > , typename BaseFEMIntegrator::template System< UIntPack< FEMSignature< FEMSigs >::Degree ... > >& F , const DenseNodeData< Real , UIntPack< FEMSigs ... > >& constraints , DenseNodeData< Real , UIntPack< FEMSigs ... > >& solution , LocalDepth maxSolveDepth , const SolverInfo& solverInfo , InterpolationInfo< Real , PointDs >* ... iData ) const { return solveSystem< FEMSigs ... , Real >( UIntPack< FEMSigs ... >() , F , constraints , solution , []( Real v , Real w ){ return v*w; } , maxSolveDepth , solverInfo , iData ... ); } template< unsigned int ... FEMSigs , unsigned int ... PointDs > DenseNodeData< Real , UIntPack< FEMSigs ... > > solveSystem( UIntPack< FEMSigs ... > , typename BaseFEMIntegrator::template System< UIntPack< FEMSignature< FEMSigs >::Degree ... > >& F , const DenseNodeData< Real , UIntPack< FEMSigs ... > >& constraints , LocalDepth maxSolveDepth , const SolverInfo& solverInfo , InterpolationInfo< Real , PointDs >* ... iData ) const { return solveSystem( UIntPack< FEMSigs ... >() , F , constraints , []( Real v , Real w ){ return v*w; } , maxSolveDepth , solverInfo , iData ... ); } FEMTreeNode& spaceRoot( void ){ return *_spaceRoot; } const FEMTreeNode& tree( void ) const { return *_tree; } std::function< void ( FEMTreeNode& ) > initializer( void ){ return _NodeInitializer( *this ); } size_t leaves( void ) const { return _tree->leaves(); } size_t nodes( void ) const { int count = 0 ; for( const FEMTreeNode* n=_tree->nextNode() ; n ; n=_tree->nextNode( n ) ) if( IsActiveNode< Dim >( n ) ) count++ ; return count; } size_t ghostNodes( void ) const { int count = 0 ; for( const FEMTreeNode* n=_tree->nextNode() ; n ; n=_tree->nextNode( n ) ) if( !IsActiveNode< Dim >( n ) ) count++ ; return count; } inline size_t validSpaceNodes( void ) const { int count = 0 ; for( const FEMTreeNode* n=_tree->nextNode() ; n ; n=_tree->nextNode( n ) ) if( isValidSpaceNode( n ) ) count++ ; return count; } inline size_t validSpaceNodes( LocalDepth d ) const { int count = 0 ; for( const FEMTreeNode* n=_tree->nextNode() ; n ; n=_tree->nextNode( n ) ) if( _localDepth(n)==d && isValidSpaceNode( n ) ) count++ ; return count; } template< unsigned int ... FEMSigs > size_t validFEMNodes( UIntPack< FEMSigs ... > ) const { int count = 0 ; for( const FEMTreeNode* n=_tree->nextNode() ; n ; n=_tree->nextNode( n ) ) if( isValidFEMNode( UIntPack< FEMSigs ... >() , n ) ) count++ ; return count; } template< unsigned int ... FEMSigs > size_t validFEMNodes( UIntPack< FEMSigs ... > , LocalDepth d ) const { int count = 0 ; for( const FEMTreeNode* n=_tree->nextNode() ; n ; n=_tree->nextNode( n ) ) if( _localDepth(n)==d && isValidFEMNode( UIntPack< FEMSigs ... >() , n ) ) count++ ; return count; } LocalDepth depth( void ) const { return _spaceRoot->maxDepth(); } void resetNodeIndices( void ){ _nodeCount = 0 ; for( FEMTreeNode* node=_tree->nextNode() ; node ; node=_tree->nextNode( node ) ) _nodeInitializer( *node ) , node->nodeData.flags=0; } std::vector< int > merge( FEMTree* tree ); protected: template< class Real1 , unsigned int _Dim > static bool _IsZero( Point< Real1 , _Dim > p ); template< class Real1 > static bool _IsZero( Real1 p ); template< class SReal , class Data , unsigned int _Dim > static Data _StencilDot( Point< SReal , _Dim > p1 , Point< Data , _Dim > p2 ); template< class SReal , class Data > static Data _StencilDot( Point< SReal , 1 > p1 , Point< Data , 1 > p2 ); template< class SReal , class Data > static Data _StencilDot( SReal p1 , Point< Data , 1 > p2 ); template< class SReal , class Data > static Data _StencilDot( Point< SReal , 1 > p1 , Data p2 ); template< class SReal , class Data > static Data _StencilDot( SReal p1 , Data p2 ); // We need the signatures to test if nodes are valid template< typename T , unsigned int ... FEMSigs , unsigned int ... CSigs , unsigned int ... FEMDegrees , unsigned int ... CDegrees , unsigned int CDim , class Coefficients > void _addFEMConstraints( UIntPack< FEMSigs ... > , UIntPack< CSigs ... > , typename BaseFEMIntegrator::Constraint< UIntPack< FEMDegrees ... > , UIntPack< CDegrees ... > , CDim >& F , const Coefficients& coefficients , Pointer( T ) constraints , LocalDepth maxDepth ) const; template< typename T , typename TDotT , unsigned int ... FEMSigs1 , unsigned int ... FEMSigs2 , unsigned int ... Degrees1 , unsigned int ... Degrees2 , class Coefficients1 , class Coefficients2 > double _dot( UIntPack< FEMSigs1 ... > , UIntPack< FEMSigs2 ... > , typename BaseFEMIntegrator::Constraint< UIntPack< Degrees1 ... > , UIntPack< Degrees2 ... > , 1 >& F , const Coefficients1& coefficients1 , const Coefficients2& coefficients2 , TDotT Dot ) const; template< typename T , typename TDotT , unsigned int ... FEMSigs1 , unsigned int ... FEMSigs2 , class Coefficients1 , class Coefficients2 , unsigned int PointD > double _interpolationDot( UIntPack< FEMSigs1 ... > , UIntPack< FEMSigs2 ... > , const Coefficients1& coefficients1 , const Coefficients2& coefficients2 , TDotT Dot , const InterpolationInfo< T , PointD >* iInfo ) const; template< typename T , typename TDotT , unsigned int ... FEMSigs1 , unsigned int ... FEMSigs2 , class Coefficients1 , class Coefficients2 , unsigned int PointD , unsigned int ... PointDs > double _interpolationDot( UIntPack< FEMSigs1 ... > , UIntPack< FEMSigs2 ... > , const Coefficients1& coefficients1 , const Coefficients2& coefficients2 , TDotT Dot , const InterpolationInfo< T , PointD >* iInfo , const InterpolationInfo< T , PointDs >* ... iInfos ) const { return _interpolationDot< T >( UIntPack< FEMSigs1 ... >() , UIntPack< FEMSigs2 ... >() , coefficients1 , coefficients2 , Dot , iInfo ) + _interpolationDot< T >( UIntPack< FEMSigs1 ... >() , UIntPack< FEMSigs2 ... >() , coefficients1 , coefficients2 , Dot , iInfos... ); } template< typename T , typename TDotT , unsigned int ... FEMSigs1 , unsigned int ... FEMSigs2 , class Coefficients1 , class Coefficients2 > double _interpolationDot( UIntPack< FEMSigs1 ... > , UIntPack< FEMSigs2 ... > , const Coefficients1& coefficients1 , const Coefficients2& coefficients2 , TDotT Dot ) const{ return 0; } }; template< unsigned int Dim , class Real > double FEMTree< Dim , Real >::_MaxMemoryUsage = 0; template< unsigned int Dim , class Real > double FEMTree< Dim , Real >::_LocalMemoryUsage = 0; template< unsigned int Dim , class Real , class Vertex > struct IsoSurfaceExtractor { struct IsoStats{}; template< typename Data , unsigned int ... FEMSigs , unsigned int WeightDegree , unsigned int DataSig > static IsoStats Extract ( UIntPack< FEMSigs ... > , UIntPack< WeightDegree > , UIntPack< DataSig > , // Dummy variables for grouping the parameter const FEMTree< Dim , Real >& tree , // The tree over which the system is discretized const typename FEMTree< Dim , Real >::template DensityEstimator< WeightDegree >* densityWeights , // Density weights const SparseNodeData< ProjectiveData< Data , Real > , IsotropicUIntPack< Dim , DataSig > >* data , // Auxiliary spatial data const DenseNodeData< Real , UIntPack< FEMSigs ... > >& coefficients , // The coefficients of the function Real isoValue , // The value at which to extract the level-set CoredMeshData< Vertex >& mesh , // The mesh in which to store the output std::function< void ( Vertex& , Point< Real , Dim > , Real , Data ) > SetVertex , // A function for setting the depth and data of a vertex bool nonLinearFit , // Should a linear interpolant be used bool addBarycenter , // Should we triangulate polygons by adding a mid-point bool polygonMesh , // Should we output triangles or polygons bool flipOrientation // Should we flip the orientation ) { // The unspecialized implementation is not supported WARN( "Iso-surface extraction not supported for dimension %d" , Dim ); return IsoStats(); } }; template< unsigned int Dim , class Real > struct FEMTreeInitializer { typedef RegularTreeNode< Dim , FEMTreeNodeData > FEMTreeNode; typedef NodeAndPointSample< Dim , Real > PointSample; template< class Data > struct DerivativeStream { virtual void resolution( unsigned int res[] ) const = 0; virtual bool nextDerivative( unsigned int idx[] , unsigned int& dir , Data& dValue ) = 0; }; // Initialize the tree using a refinement avatar static int Initialize( FEMTreeNode& root , int maxDepth , std::function< bool ( int , int[] ) > Refine , Allocator< FEMTreeNode >* nodeAllocator , std::function< void ( FEMTreeNode& ) > NodeInitializer ); // Initialize the tree using a point stream static int Initialize(FEMTreeNode& root, InputPointStream< Real, Dim >& pointStream, int maxDepth, std::vector< PointSample >& samplePoints, Allocator< FEMTreeNode >* nodeAllocator, std::function< void(FEMTreeNode&) > NodeInitializer); template< class Data > static int Initialize(FEMTreeNode& root, InputPointStreamWithData< Real, Dim, Data >& pointStream, int maxDepth, std::vector< PointSample >& samplePoints, std::vector< Data >& sampleData, bool mergeNodeSamples, Allocator< FEMTreeNode >* nodeAllocator, std::function< void(FEMTreeNode&) > NodeInitializer, std::function< Real(const Point< Real, Dim >&, Data&) > ProcessData = [](const Point< Real, Dim >&, Data&) { return (Real)1.; }); // modified by dojo template< class Data > static int Initialize(FEMTreeNode& root, InputPointStreamWithData< Real, Dim, Data >& pointStream, int maxDepth, std::vector< PointSample >& samplePoints, std::vector< Data >& sampleData, bool mergeNodeSamples, Allocator< FEMTreeNode >* nodeAllocator, std::function< void(FEMTreeNode&) > NodeInitializer, std::function< Real(const Point< Real, Dim >&, const int, bool&, Data&) > ProcessData = [](const Point< Real, Dim >&, const int, bool&, Data&) { return (Real)1.; }); // Initialize the tree using simplices static void Initialize( FEMTreeNode& root , const std::vector< Point< Real , Dim > >& vertices , const std::vector< SimplexIndex< Dim-1 > >& simplices , int maxDepth , std::vector< PointSample >& samples , bool mergeNodeSamples , Allocator< FEMTreeNode >* nodeAllocator , std::function< void ( FEMTreeNode& ) > NodeInitializer ); static void Initialize( FEMTreeNode& root , const std::vector< Point< Real , Dim > >& vertices , const std::vector< SimplexIndex< Dim-1 > >& simplices , int maxDepth , std::vector< NodeSimplices< Dim , Real > >& nodeSimplices , Allocator< FEMTreeNode >* nodeAllocator , std::function< void ( FEMTreeNode& ) > NodeInitializer ); template< class Data , class _Data , bool Dual=true > static int Initialize( FEMTreeNode& root , ConstPointer( Data ) values , ConstPointer( int ) labels , int resolution[Dim] , std::vector< NodeSample< Dim , _Data > > derivatives[Dim] , Allocator< FEMTreeNode >* nodeAllocator , std::function< void ( FEMTreeNode& ) > NodeInitializer , std::function< _Data ( const Data& ) > DataConverter = []( const Data& d ){ return (_Data)d; } ); template< bool Dual , class Data > static unsigned int Initialize( FEMTreeNode& root , DerivativeStream< Data >& dStream , std::vector< NodeSample< Dim , Data > > derivatives[Dim] , Allocator< FEMTreeNode >* nodeAllocator , std::function< void ( FEMTreeNode& ) > NodeInitializer ); protected: static int _AddSimplex( FEMTreeNode& root , Simplex< Real , Dim , Dim-1 >& s , int maxDepth , std::vector< PointSample >& samples , std::vector< int >* nodeToIndexMap , Allocator< FEMTreeNode >* nodeAllocator , std::function< void ( FEMTreeNode& ) > NodeInitializer ); static int _AddSimplex( FEMTreeNode& root , Simplex< Real , Dim , Dim-1 >& s , int maxDepth , std::vector< NodeSimplices< Dim , Real > >& simplices , std::vector< int >& nodeToIndexMap , Allocator< FEMTreeNode >* nodeAllocator , std::function< void ( FEMTreeNode& ) > NodeInitializer ); static int _AddSimplex( FEMTreeNode* node , Simplex< Real , Dim , Dim-1 >& s , int maxDepth , std::vector< PointSample >& samples , std::vector< int >* nodeToIndexMap , Allocator< FEMTreeNode >* nodeAllocator , std::function< void ( FEMTreeNode& ) > NodeInitializer ); static int _AddSimplex( FEMTreeNode* node , Simplex< Real , Dim , Dim-1 >& s , int maxDepth , std::vector< NodeSimplices< Dim , Real > >& simplices , std::vector< int >& nodeToIndexMap , Allocator< FEMTreeNode >* nodeAllocator , std::function< void ( FEMTreeNode& ) > NodeInitializer ); }; template< unsigned int Dim , class Real > template< unsigned int ... SupportSizes > const unsigned int FEMTree< Dim , Real >::CornerLoopData< SupportSizes ... >::supportSizes[] = { SupportSizes ... }; #include "FEMTree.inl" #include "FEMTree.SortedTreeNodes.inl" #include "FEMTree.WeightedSamples.inl" #include "FEMTree.System.inl" #include "FEMTree.Evaluation.inl" #include "FEMTree.IsoSurface.specialized.inl" #include "FEMTree.Initialize.inl" #endif // FEM_TREE_INCLUDED
Merge.c
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> #define SWAP(a,b) {tt=(a); (a)=(b); (b)=tt;} void Merge(int *a1,int n1, int *a2,int n2, int *r) { int i1,i2,i; for (i1=0, i2=0, i=0;i1<n1&&i2<n2;) if (a1[i1]<a2[i2])r[i++]=a1[i1++]; else r[i++]=a2[i2++]; while(i1<n1) r[i++]=a1[i1++]; while(i2<n2) r[i++]=a2[i2++]; } void MSort(int *m,int n,int *t) { int n1,n2,i; if(n<=1)return; n1=n/2; n2=n-n1; MSort (m,n1,t); MSort(m+n1,n2,t); Merge(m,n1,m+n1,n2,t); memcpy (m,t,n*sizeof(int)); } void MSort2(int *m,int n,int *t) { int n1,n2,n3,n4; if (n<=1)return; n1=n/2; n3=n-n1; n2=n1/2; n1=n1-n2; n4=n3/2; n3=n3-n4; #pragma omp parallel sections { #pragma omp section {MSort(m,n1,t);} #pragma omp section {MSort (m+n1,n2,t+n1);} #pragma omp section {MSort (m+n1+n2,n3,t+n1+n2);} #pragma omp section {MSort (m+n1+n2+n3,n4,t+n1+n2+n3);} } #pragma omp parallel sections { #pragma omp section {Merge(m,n1, m+n1,n2, t); memcpy(m,t, (n1+n2)*sizeof(int));} #pragma omp section {Merge(m+n1+n2,n3,m+n1+n2+n3,n4,t+n1+n2); memcpy(m+n1+n2, t+n1+n2, (n3+n4)*sizeof(int));} } Merge(m,n1+n2, m+n1+n2,n3+n4,t); memcpy(m,t,n*sizeof(int)); } void MSort3(int *m,int n,int *t) { int k,k2,i,tt; //for (k=1;k<n;k*=2) k=1; { k2=k; #pragma omp parallel for private(tt) for(i=0;i<n-1;i+=2) if(m[i]>m[i+1]) SWAP(m[i],m[i+1]); } for(k=2;k<n;k*=2) { k2=k; #pragma omp parallel for private(k2) for(i=0;i<n-k;i+=2*k) {k2=k; if(n-i-k<k2)k2=n-i-k; Merge(m+i,k, m+i+k,k2,t+i); memcpy(m+i,t+i,(k+k2)*sizeof(int)); } } } int main(void) { int i,*a,*a0,*t,*b,n=100000; time_t t0,t1; a0=(int*)malloc(n*sizeof(int)); a=(int*)malloc(n*sizeof(int)); t=(int*)malloc(n*sizeof(int)); b=(int*)malloc(n*sizeof(int)); for(i=0;i<n;i++)a0[i]=rand()%n; // memcpy(b,a0,n*sizeof(int)); time(&t0); MSort(b,n,t); time(&t1); printf("MSort:%d\n", (int)(t1-t0)); for(i=1;i<n;i++) if(b[i]<b[i-1]) printf ("Err1:i=%d\n",i); // memcpy(a,a0,n*sizeof(int)); time(&t0); MSort3(a,n,t); time(&t1); printf("MSort3:%d\n", (int)(t1-t0)); for(i=0;i<n;i++) if(a[i]!=b[i]) printf ("Err2:i=%d\n",i); // memcpy(a,a0,n*sizeof(int)); time(&t0); MSort2(a,n,t); time(&t1); printf("MSort2:%d\n", (int)(t1-t0)); for(i=0;i<n;i++) if(a[i]!=b[i]) printf ("Err3:i=%d\n",i); // free(a0) ;a0=NULL; free(a); a=NULL; free(t); t=NULL; free(b); b=NULL; printf("done\n"); return 0; }
acado_integrator.c
/* * This file was auto-generated using the ACADO Toolkit. * * While ACADO Toolkit is free software released under the terms of * the GNU Lesser General Public License (LGPL), the generated code * as such remains the property of the user who used ACADO Toolkit * to generate this code. In particular, user dependent data of the code * do not inherit the GNU LGPL license. On the other hand, parts of the * generated code that are a direct copy of source code from the * ACADO Toolkit or the software tools it is based on, remain, as derived * work, automatically covered by the LGPL license. * * ACADO Toolkit is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * */ #include "acado_common.h" real_t rk_dim9_swap; /** Column vector of size: 9 */ real_t rk_dim9_bPerm[ 9 ]; /** Column vector of size: 62 */ real_t auxVar[ 62 ]; real_t rk_ttt; /** Row vector of size: 21 */ real_t rk_xxx[ 21 ]; /** Column vector of size: 9 */ real_t rk_kkk[ 9 ]; /** Matrix of size: 9 x 9 (row major format) */ real_t rk_A[ 81 ]; /** Column vector of size: 9 */ real_t rk_b[ 9 ]; /** Row vector of size: 9 */ int rk_dim9_perm[ 9 ]; /** Column vector of size: 9 */ real_t rk_rhsTemp[ 9 ]; /** Row vector of size: 108 */ real_t rk_diffsTemp2[ 108 ]; /** Column vector of size: 9 */ real_t rk_diffK[ 9 ]; /** Matrix of size: 9 x 12 (row major format) */ real_t rk_diffsNew2[ 108 ]; #pragma omp threadprivate( auxVar, rk_ttt, rk_xxx, rk_kkk, rk_diffK, rk_rhsTemp, rk_dim9_perm, rk_A, rk_b, rk_diffsNew2, rk_diffsTemp2, rk_dim9_swap, rk_dim9_bPerm ) void acado_rhs(const real_t* in, real_t* out) { const real_t* xd = in; const real_t* u = in + 9; const real_t* od = in + 12; /* Vector of auxiliary variables; number of elements: 29. */ real_t* a = auxVar; /* Compute intermediate quantities: */ a[0] = (sin(xd[4])); a[1] = (cos(xd[3])); a[2] = (cos(xd[5])); a[3] = (sin(xd[4])); a[4] = (sin(xd[3])); a[5] = (sin(xd[5])); a[6] = (cos(xd[4])); a[7] = (cos(xd[5])); a[8] = (cos(xd[4])); a[9] = (sin(xd[5])); a[10] = (cos(xd[3])); a[11] = (sin(xd[5])); a[12] = (cos(xd[5])); a[13] = (sin(xd[4])); a[14] = (sin(xd[3])); a[15] = (cos(xd[3])); a[16] = (cos(xd[5])); a[17] = (sin(xd[4])); a[18] = (sin(xd[3])); a[19] = (sin(xd[5])); a[20] = (cos(xd[3])); a[21] = (sin(xd[4])); a[22] = (sin(xd[5])); a[23] = (cos(xd[5])); a[24] = (sin(xd[3])); a[25] = (cos(xd[4])); a[26] = (sin(xd[3])); a[27] = (cos(xd[4])); a[28] = (cos(xd[3])); /* Compute outputs: */ out[0] = (((((((((real_t)(0.0000000000000000e+00)-((real_t)(0.0000000000000000e+00)-a[0]))*od[4])*u[2])*xd[2])+((((a[1]*a[2])*a[3])+(a[4]*a[5]))*u[2]))-((((a[6]*a[7])*od[4])*u[2])*xd[0]))-((((a[8]*od[4])*a[9])*u[2])*xd[1]))+od[6]); out[1] = (((((((((real_t)(0.0000000000000000e+00)-((((real_t)(0.0000000000000000e+00)-a[10])*a[11])+((a[12]*a[13])*a[14])))*od[5])*u[2])*xd[0])-(((((a[15]*a[16])+((a[17]*a[18])*a[19]))*od[5])*u[2])*xd[1]))+((((a[20]*a[21])*a[22])-(a[23]*a[24]))*u[2]))-((((a[25]*od[5])*a[26])*u[2])*xd[2]))+od[7]); out[2] = (((real_t)(-9.8065999999999995e+00)+((a[27]*a[28])*u[2]))+od[8]); out[3] = ((((real_t)(0.0000000000000000e+00)-xd[3])+(od[1]*u[0]))/od[0]); out[4] = ((((real_t)(0.0000000000000000e+00)-xd[4])+(od[3]*u[1]))/od[2]); out[5] = (real_t)(0.0000000000000000e+00); out[6] = xd[0]; out[7] = xd[1]; out[8] = xd[2]; } void acado_diffs(const real_t* in, real_t* out) { const real_t* xd = in; const real_t* u = in + 9; const real_t* od = in + 12; /* Vector of auxiliary variables; number of elements: 62. */ real_t* a = auxVar; /* Compute intermediate quantities: */ a[0] = (cos(xd[4])); a[1] = (cos(xd[5])); a[2] = (cos(xd[4])); a[3] = (sin(xd[5])); a[4] = (sin(xd[4])); a[5] = ((real_t)(-1.0000000000000000e+00)*(sin(xd[3]))); a[6] = (cos(xd[5])); a[7] = (sin(xd[4])); a[8] = (cos(xd[3])); a[9] = (sin(xd[5])); a[10] = (cos(xd[4])); a[11] = (cos(xd[3])); a[12] = (cos(xd[4])); a[13] = ((real_t)(-1.0000000000000000e+00)*(sin(xd[4]))); a[14] = ((real_t)(-1.0000000000000000e+00)*(sin(xd[4]))); a[15] = ((real_t)(-1.0000000000000000e+00)*(sin(xd[5]))); a[16] = (sin(xd[3])); a[17] = (cos(xd[5])); a[18] = ((real_t)(-1.0000000000000000e+00)*(sin(xd[5]))); a[19] = (cos(xd[5])); a[20] = (cos(xd[3])); a[21] = (sin(xd[5])); a[22] = (cos(xd[5])); a[23] = (sin(xd[4])); a[24] = (sin(xd[3])); a[25] = (cos(xd[3])); a[26] = (cos(xd[5])); a[27] = (sin(xd[4])); a[28] = (sin(xd[3])); a[29] = (sin(xd[5])); a[30] = (cos(xd[4])); a[31] = (sin(xd[3])); a[32] = ((real_t)(-1.0000000000000000e+00)*(sin(xd[3]))); a[33] = (cos(xd[3])); a[34] = ((real_t)(-1.0000000000000000e+00)*(sin(xd[3]))); a[35] = (cos(xd[3])); a[36] = ((real_t)(-1.0000000000000000e+00)*(sin(xd[3]))); a[37] = (sin(xd[4])); a[38] = (sin(xd[5])); a[39] = (cos(xd[5])); a[40] = (cos(xd[3])); a[41] = (cos(xd[3])); a[42] = (cos(xd[4])); a[43] = (cos(xd[4])); a[44] = (cos(xd[3])); a[45] = (cos(xd[4])); a[46] = ((real_t)(-1.0000000000000000e+00)*(sin(xd[4]))); a[47] = (cos(xd[5])); a[48] = ((real_t)(-1.0000000000000000e+00)*(sin(xd[5]))); a[49] = ((real_t)(-1.0000000000000000e+00)*(sin(xd[5]))); a[50] = (cos(xd[5])); a[51] = (cos(xd[5])); a[52] = ((real_t)(-1.0000000000000000e+00)*(sin(xd[5]))); a[53] = (sin(xd[3])); a[54] = (cos(xd[4])); a[55] = ((real_t)(-1.0000000000000000e+00)*(sin(xd[3]))); a[56] = ((real_t)(-1.0000000000000000e+00)*(sin(xd[4]))); a[57] = (cos(xd[3])); a[58] = ((real_t)(1.0000000000000000e+00)/od[0]); a[59] = ((real_t)(1.0000000000000000e+00)/od[0]); a[60] = ((real_t)(1.0000000000000000e+00)/od[2]); a[61] = ((real_t)(1.0000000000000000e+00)/od[2]); /* Compute outputs: */ out[0] = ((real_t)(0.0000000000000000e+00)-(((a[0]*a[1])*od[4])*u[2])); out[1] = ((real_t)(0.0000000000000000e+00)-(((a[2]*od[4])*a[3])*u[2])); out[2] = ((((real_t)(0.0000000000000000e+00)-((real_t)(0.0000000000000000e+00)-a[4]))*od[4])*u[2]); out[3] = ((((a[5]*a[6])*a[7])+(a[8]*a[9]))*u[2]); out[4] = ((((((((real_t)(0.0000000000000000e+00)-((real_t)(0.0000000000000000e+00)-a[10]))*od[4])*u[2])*xd[2])+(((a[11]*a[6])*a[12])*u[2]))-((((a[13]*a[1])*od[4])*u[2])*xd[0]))-((((a[14]*od[4])*a[3])*u[2])*xd[1])); out[5] = ((((((a[11]*a[15])*a[7])+(a[16]*a[17]))*u[2])-((((a[0]*a[18])*od[4])*u[2])*xd[0]))-((((a[2]*od[4])*a[19])*u[2])*xd[1])); out[6] = (real_t)(0.0000000000000000e+00); out[7] = (real_t)(0.0000000000000000e+00); out[8] = (real_t)(0.0000000000000000e+00); out[9] = (real_t)(0.0000000000000000e+00); out[10] = (real_t)(0.0000000000000000e+00); out[11] = (((((((real_t)(0.0000000000000000e+00)-((real_t)(0.0000000000000000e+00)-a[4]))*od[4])*xd[2])+(((a[11]*a[6])*a[7])+(a[16]*a[9])))-(((a[0]*a[1])*od[4])*xd[0]))-(((a[2]*od[4])*a[3])*xd[1])); out[12] = ((((real_t)(0.0000000000000000e+00)-((((real_t)(0.0000000000000000e+00)-a[20])*a[21])+((a[22]*a[23])*a[24])))*od[5])*u[2]); out[13] = ((real_t)(0.0000000000000000e+00)-((((a[25]*a[26])+((a[27]*a[28])*a[29]))*od[5])*u[2])); out[14] = ((real_t)(0.0000000000000000e+00)-(((a[30]*od[5])*a[31])*u[2])); out[15] = ((((((((real_t)(0.0000000000000000e+00)-((((real_t)(0.0000000000000000e+00)-a[32])*a[21])+((a[22]*a[23])*a[33])))*od[5])*u[2])*xd[0])-(((((a[34]*a[26])+((a[27]*a[35])*a[29]))*od[5])*u[2])*xd[1]))+((((a[36]*a[37])*a[38])-(a[39]*a[40]))*u[2]))-((((a[30]*od[5])*a[41])*u[2])*xd[2])); out[16] = ((((((((real_t)(0.0000000000000000e+00)-((a[22]*a[42])*a[24]))*od[5])*u[2])*xd[0])-(((((a[43]*a[28])*a[29])*od[5])*u[2])*xd[1]))+(((a[44]*a[45])*a[38])*u[2]))-((((a[46]*od[5])*a[31])*u[2])*xd[2])); out[17] = (((((((real_t)(0.0000000000000000e+00)-((((real_t)(0.0000000000000000e+00)-a[20])*a[47])+((a[48]*a[23])*a[24])))*od[5])*u[2])*xd[0])-(((((a[25]*a[49])+((a[27]*a[28])*a[50]))*od[5])*u[2])*xd[1]))+((((a[44]*a[37])*a[51])-(a[52]*a[53]))*u[2])); out[18] = (real_t)(0.0000000000000000e+00); out[19] = (real_t)(0.0000000000000000e+00); out[20] = (real_t)(0.0000000000000000e+00); out[21] = (real_t)(0.0000000000000000e+00); out[22] = (real_t)(0.0000000000000000e+00); out[23] = (((((((real_t)(0.0000000000000000e+00)-((((real_t)(0.0000000000000000e+00)-a[20])*a[21])+((a[22]*a[23])*a[24])))*od[5])*xd[0])-((((a[25]*a[26])+((a[27]*a[28])*a[29]))*od[5])*xd[1]))+(((a[44]*a[37])*a[38])-(a[39]*a[53])))-(((a[30]*od[5])*a[31])*xd[2])); out[24] = (real_t)(0.0000000000000000e+00); out[25] = (real_t)(0.0000000000000000e+00); out[26] = (real_t)(0.0000000000000000e+00); out[27] = ((a[54]*a[55])*u[2]); out[28] = ((a[56]*a[57])*u[2]); out[29] = (real_t)(0.0000000000000000e+00); out[30] = (real_t)(0.0000000000000000e+00); out[31] = (real_t)(0.0000000000000000e+00); out[32] = (real_t)(0.0000000000000000e+00); out[33] = (real_t)(0.0000000000000000e+00); out[34] = (real_t)(0.0000000000000000e+00); out[35] = (a[54]*a[57]); out[36] = (real_t)(0.0000000000000000e+00); out[37] = (real_t)(0.0000000000000000e+00); out[38] = (real_t)(0.0000000000000000e+00); out[39] = (((real_t)(0.0000000000000000e+00)-(real_t)(1.0000000000000000e+00))*a[58]); out[40] = (real_t)(0.0000000000000000e+00); out[41] = (real_t)(0.0000000000000000e+00); out[42] = (real_t)(0.0000000000000000e+00); out[43] = (real_t)(0.0000000000000000e+00); out[44] = (real_t)(0.0000000000000000e+00); out[45] = (od[1]*a[59]); out[46] = (real_t)(0.0000000000000000e+00); out[47] = (real_t)(0.0000000000000000e+00); out[48] = (real_t)(0.0000000000000000e+00); out[49] = (real_t)(0.0000000000000000e+00); out[50] = (real_t)(0.0000000000000000e+00); out[51] = (real_t)(0.0000000000000000e+00); out[52] = (((real_t)(0.0000000000000000e+00)-(real_t)(1.0000000000000000e+00))*a[60]); out[53] = (real_t)(0.0000000000000000e+00); out[54] = (real_t)(0.0000000000000000e+00); out[55] = (real_t)(0.0000000000000000e+00); out[56] = (real_t)(0.0000000000000000e+00); out[57] = (real_t)(0.0000000000000000e+00); out[58] = (od[3]*a[61]); out[59] = (real_t)(0.0000000000000000e+00); out[60] = (real_t)(0.0000000000000000e+00); out[61] = (real_t)(0.0000000000000000e+00); out[62] = (real_t)(0.0000000000000000e+00); out[63] = (real_t)(0.0000000000000000e+00); out[64] = (real_t)(0.0000000000000000e+00); out[65] = (real_t)(0.0000000000000000e+00); out[66] = (real_t)(0.0000000000000000e+00); out[67] = (real_t)(0.0000000000000000e+00); out[68] = (real_t)(0.0000000000000000e+00); out[69] = (real_t)(0.0000000000000000e+00); out[70] = (real_t)(0.0000000000000000e+00); out[71] = (real_t)(0.0000000000000000e+00); out[72] = (real_t)(1.0000000000000000e+00); out[73] = (real_t)(0.0000000000000000e+00); out[74] = (real_t)(0.0000000000000000e+00); out[75] = (real_t)(0.0000000000000000e+00); out[76] = (real_t)(0.0000000000000000e+00); out[77] = (real_t)(0.0000000000000000e+00); out[78] = (real_t)(0.0000000000000000e+00); out[79] = (real_t)(0.0000000000000000e+00); out[80] = (real_t)(0.0000000000000000e+00); out[81] = (real_t)(0.0000000000000000e+00); out[82] = (real_t)(0.0000000000000000e+00); out[83] = (real_t)(0.0000000000000000e+00); out[84] = (real_t)(0.0000000000000000e+00); out[85] = (real_t)(1.0000000000000000e+00); out[86] = (real_t)(0.0000000000000000e+00); out[87] = (real_t)(0.0000000000000000e+00); out[88] = (real_t)(0.0000000000000000e+00); out[89] = (real_t)(0.0000000000000000e+00); out[90] = (real_t)(0.0000000000000000e+00); out[91] = (real_t)(0.0000000000000000e+00); out[92] = (real_t)(0.0000000000000000e+00); out[93] = (real_t)(0.0000000000000000e+00); out[94] = (real_t)(0.0000000000000000e+00); out[95] = (real_t)(0.0000000000000000e+00); out[96] = (real_t)(0.0000000000000000e+00); out[97] = (real_t)(0.0000000000000000e+00); out[98] = (real_t)(1.0000000000000000e+00); out[99] = (real_t)(0.0000000000000000e+00); out[100] = (real_t)(0.0000000000000000e+00); out[101] = (real_t)(0.0000000000000000e+00); out[102] = (real_t)(0.0000000000000000e+00); out[103] = (real_t)(0.0000000000000000e+00); out[104] = (real_t)(0.0000000000000000e+00); out[105] = (real_t)(0.0000000000000000e+00); out[106] = (real_t)(0.0000000000000000e+00); out[107] = (real_t)(0.0000000000000000e+00); } void acado_solve_dim9_triangular( real_t* const A, real_t* const b ) { b[8] = b[8]/A[80]; b[7] -= + A[71]*b[8]; b[7] = b[7]/A[70]; b[6] -= + A[62]*b[8]; b[6] -= + A[61]*b[7]; b[6] = b[6]/A[60]; b[5] -= + A[53]*b[8]; b[5] -= + A[52]*b[7]; b[5] -= + A[51]*b[6]; b[5] = b[5]/A[50]; b[4] -= + A[44]*b[8]; b[4] -= + A[43]*b[7]; b[4] -= + A[42]*b[6]; b[4] -= + A[41]*b[5]; b[4] = b[4]/A[40]; b[3] -= + A[35]*b[8]; b[3] -= + A[34]*b[7]; b[3] -= + A[33]*b[6]; b[3] -= + A[32]*b[5]; b[3] -= + A[31]*b[4]; b[3] = b[3]/A[30]; b[2] -= + A[26]*b[8]; b[2] -= + A[25]*b[7]; b[2] -= + A[24]*b[6]; b[2] -= + A[23]*b[5]; b[2] -= + A[22]*b[4]; b[2] -= + A[21]*b[3]; b[2] = b[2]/A[20]; b[1] -= + A[17]*b[8]; b[1] -= + A[16]*b[7]; b[1] -= + A[15]*b[6]; b[1] -= + A[14]*b[5]; b[1] -= + A[13]*b[4]; b[1] -= + A[12]*b[3]; b[1] -= + A[11]*b[2]; b[1] = b[1]/A[10]; b[0] -= + A[8]*b[8]; b[0] -= + A[7]*b[7]; b[0] -= + A[6]*b[6]; b[0] -= + A[5]*b[5]; b[0] -= + A[4]*b[4]; b[0] -= + A[3]*b[3]; b[0] -= + A[2]*b[2]; b[0] -= + A[1]*b[1]; b[0] = b[0]/A[0]; } real_t acado_solve_dim9_system( real_t* const A, real_t* const b, int* const rk_perm ) { real_t det; int i; int j; int k; int indexMax; int intSwap; real_t valueMax; real_t temp; for (i = 0; i < 9; ++i) { rk_perm[i] = i; } det = 1.0000000000000000e+00; for( i=0; i < (8); i++ ) { indexMax = i; valueMax = fabs(A[i*9+i]); for( j=(i+1); j < 9; j++ ) { temp = fabs(A[j*9+i]); if( temp > valueMax ) { indexMax = j; valueMax = temp; } } if( indexMax > i ) { for (k = 0; k < 9; ++k) { rk_dim9_swap = A[i*9+k]; A[i*9+k] = A[indexMax*9+k]; A[indexMax*9+k] = rk_dim9_swap; } rk_dim9_swap = b[i]; b[i] = b[indexMax]; b[indexMax] = rk_dim9_swap; intSwap = rk_perm[i]; rk_perm[i] = rk_perm[indexMax]; rk_perm[indexMax] = intSwap; } det *= A[i*9+i]; for( j=i+1; j < 9; j++ ) { A[j*9+i] = -A[j*9+i]/A[i*9+i]; for( k=i+1; k < 9; k++ ) { A[j*9+k] += A[j*9+i] * A[i*9+k]; } b[j] += A[j*9+i] * b[i]; } } det *= A[80]; det = fabs(det); acado_solve_dim9_triangular( A, b ); return det; } void acado_solve_dim9_system_reuse( real_t* const A, real_t* const b, int* const rk_perm ) { rk_dim9_bPerm[0] = b[rk_perm[0]]; rk_dim9_bPerm[1] = b[rk_perm[1]]; rk_dim9_bPerm[2] = b[rk_perm[2]]; rk_dim9_bPerm[3] = b[rk_perm[3]]; rk_dim9_bPerm[4] = b[rk_perm[4]]; rk_dim9_bPerm[5] = b[rk_perm[5]]; rk_dim9_bPerm[6] = b[rk_perm[6]]; rk_dim9_bPerm[7] = b[rk_perm[7]]; rk_dim9_bPerm[8] = b[rk_perm[8]]; rk_dim9_bPerm[1] += A[9]*rk_dim9_bPerm[0]; rk_dim9_bPerm[2] += A[18]*rk_dim9_bPerm[0]; rk_dim9_bPerm[2] += A[19]*rk_dim9_bPerm[1]; rk_dim9_bPerm[3] += A[27]*rk_dim9_bPerm[0]; rk_dim9_bPerm[3] += A[28]*rk_dim9_bPerm[1]; rk_dim9_bPerm[3] += A[29]*rk_dim9_bPerm[2]; rk_dim9_bPerm[4] += A[36]*rk_dim9_bPerm[0]; rk_dim9_bPerm[4] += A[37]*rk_dim9_bPerm[1]; rk_dim9_bPerm[4] += A[38]*rk_dim9_bPerm[2]; rk_dim9_bPerm[4] += A[39]*rk_dim9_bPerm[3]; rk_dim9_bPerm[5] += A[45]*rk_dim9_bPerm[0]; rk_dim9_bPerm[5] += A[46]*rk_dim9_bPerm[1]; rk_dim9_bPerm[5] += A[47]*rk_dim9_bPerm[2]; rk_dim9_bPerm[5] += A[48]*rk_dim9_bPerm[3]; rk_dim9_bPerm[5] += A[49]*rk_dim9_bPerm[4]; rk_dim9_bPerm[6] += A[54]*rk_dim9_bPerm[0]; rk_dim9_bPerm[6] += A[55]*rk_dim9_bPerm[1]; rk_dim9_bPerm[6] += A[56]*rk_dim9_bPerm[2]; rk_dim9_bPerm[6] += A[57]*rk_dim9_bPerm[3]; rk_dim9_bPerm[6] += A[58]*rk_dim9_bPerm[4]; rk_dim9_bPerm[6] += A[59]*rk_dim9_bPerm[5]; rk_dim9_bPerm[7] += A[63]*rk_dim9_bPerm[0]; rk_dim9_bPerm[7] += A[64]*rk_dim9_bPerm[1]; rk_dim9_bPerm[7] += A[65]*rk_dim9_bPerm[2]; rk_dim9_bPerm[7] += A[66]*rk_dim9_bPerm[3]; rk_dim9_bPerm[7] += A[67]*rk_dim9_bPerm[4]; rk_dim9_bPerm[7] += A[68]*rk_dim9_bPerm[5]; rk_dim9_bPerm[7] += A[69]*rk_dim9_bPerm[6]; rk_dim9_bPerm[8] += A[72]*rk_dim9_bPerm[0]; rk_dim9_bPerm[8] += A[73]*rk_dim9_bPerm[1]; rk_dim9_bPerm[8] += A[74]*rk_dim9_bPerm[2]; rk_dim9_bPerm[8] += A[75]*rk_dim9_bPerm[3]; rk_dim9_bPerm[8] += A[76]*rk_dim9_bPerm[4]; rk_dim9_bPerm[8] += A[77]*rk_dim9_bPerm[5]; rk_dim9_bPerm[8] += A[78]*rk_dim9_bPerm[6]; rk_dim9_bPerm[8] += A[79]*rk_dim9_bPerm[7]; acado_solve_dim9_triangular( A, rk_dim9_bPerm ); b[0] = rk_dim9_bPerm[0]; b[1] = rk_dim9_bPerm[1]; b[2] = rk_dim9_bPerm[2]; b[3] = rk_dim9_bPerm[3]; b[4] = rk_dim9_bPerm[4]; b[5] = rk_dim9_bPerm[5]; b[6] = rk_dim9_bPerm[6]; b[7] = rk_dim9_bPerm[7]; b[8] = rk_dim9_bPerm[8]; } /** Column vector of size: 1 */ static const real_t acado_Ah_mat[ 1 ] = { 5.0000000000000003e-02 }; /* Fixed step size:0.1 */ int acado_integrate( real_t* const rk_eta, int resetIntegrator ) { int error; int i; int j; int k; int run; int run1; int tmp_index1; int tmp_index2; real_t det; rk_ttt = 0.0000000000000000e+00; rk_xxx[9] = rk_eta[117]; rk_xxx[10] = rk_eta[118]; rk_xxx[11] = rk_eta[119]; rk_xxx[12] = rk_eta[120]; rk_xxx[13] = rk_eta[121]; rk_xxx[14] = rk_eta[122]; rk_xxx[15] = rk_eta[123]; rk_xxx[16] = rk_eta[124]; rk_xxx[17] = rk_eta[125]; rk_xxx[18] = rk_eta[126]; rk_xxx[19] = rk_eta[127]; rk_xxx[20] = rk_eta[128]; for (run = 0; run < 1; ++run) { if( resetIntegrator ) { for (i = 0; i < 1; ++i) { for (run1 = 0; run1 < 1; ++run1) { for (j = 0; j < 9; ++j) { rk_xxx[j] = rk_eta[j]; tmp_index1 = j; rk_xxx[j] += + acado_Ah_mat[run1]*rk_kkk[tmp_index1]; } acado_diffs( rk_xxx, &(rk_diffsTemp2[ run1 * 108 ]) ); for (j = 0; j < 9; ++j) { tmp_index1 = (run1 * 9) + (j); rk_A[tmp_index1 * 9] = + acado_Ah_mat[run1]*rk_diffsTemp2[(run1 * 108) + (j * 12)]; rk_A[tmp_index1 * 9 + 1] = + acado_Ah_mat[run1]*rk_diffsTemp2[(run1 * 108) + (j * 12 + 1)]; rk_A[tmp_index1 * 9 + 2] = + acado_Ah_mat[run1]*rk_diffsTemp2[(run1 * 108) + (j * 12 + 2)]; rk_A[tmp_index1 * 9 + 3] = + acado_Ah_mat[run1]*rk_diffsTemp2[(run1 * 108) + (j * 12 + 3)]; rk_A[tmp_index1 * 9 + 4] = + acado_Ah_mat[run1]*rk_diffsTemp2[(run1 * 108) + (j * 12 + 4)]; rk_A[tmp_index1 * 9 + 5] = + acado_Ah_mat[run1]*rk_diffsTemp2[(run1 * 108) + (j * 12 + 5)]; rk_A[tmp_index1 * 9 + 6] = + acado_Ah_mat[run1]*rk_diffsTemp2[(run1 * 108) + (j * 12 + 6)]; rk_A[tmp_index1 * 9 + 7] = + acado_Ah_mat[run1]*rk_diffsTemp2[(run1 * 108) + (j * 12 + 7)]; rk_A[tmp_index1 * 9 + 8] = + acado_Ah_mat[run1]*rk_diffsTemp2[(run1 * 108) + (j * 12 + 8)]; if( 0 == run1 ) rk_A[(tmp_index1 * 9) + (j)] -= 1.0000000000000000e+00; } acado_rhs( rk_xxx, rk_rhsTemp ); rk_b[run1 * 9] = rk_kkk[run1] - rk_rhsTemp[0]; rk_b[run1 * 9 + 1] = rk_kkk[run1 + 1] - rk_rhsTemp[1]; rk_b[run1 * 9 + 2] = rk_kkk[run1 + 2] - rk_rhsTemp[2]; rk_b[run1 * 9 + 3] = rk_kkk[run1 + 3] - rk_rhsTemp[3]; rk_b[run1 * 9 + 4] = rk_kkk[run1 + 4] - rk_rhsTemp[4]; rk_b[run1 * 9 + 5] = rk_kkk[run1 + 5] - rk_rhsTemp[5]; rk_b[run1 * 9 + 6] = rk_kkk[run1 + 6] - rk_rhsTemp[6]; rk_b[run1 * 9 + 7] = rk_kkk[run1 + 7] - rk_rhsTemp[7]; rk_b[run1 * 9 + 8] = rk_kkk[run1 + 8] - rk_rhsTemp[8]; } det = acado_solve_dim9_system( rk_A, rk_b, rk_dim9_perm ); for (j = 0; j < 1; ++j) { rk_kkk[j] += rk_b[j * 9]; rk_kkk[j + 1] += rk_b[j * 9 + 1]; rk_kkk[j + 2] += rk_b[j * 9 + 2]; rk_kkk[j + 3] += rk_b[j * 9 + 3]; rk_kkk[j + 4] += rk_b[j * 9 + 4]; rk_kkk[j + 5] += rk_b[j * 9 + 5]; rk_kkk[j + 6] += rk_b[j * 9 + 6]; rk_kkk[j + 7] += rk_b[j * 9 + 7]; rk_kkk[j + 8] += rk_b[j * 9 + 8]; } } } for (i = 0; i < 2; ++i) { for (run1 = 0; run1 < 1; ++run1) { for (j = 0; j < 9; ++j) { rk_xxx[j] = rk_eta[j]; tmp_index1 = j; rk_xxx[j] += + acado_Ah_mat[run1]*rk_kkk[tmp_index1]; } acado_rhs( rk_xxx, rk_rhsTemp ); rk_b[run1 * 9] = rk_kkk[run1] - rk_rhsTemp[0]; rk_b[run1 * 9 + 1] = rk_kkk[run1 + 1] - rk_rhsTemp[1]; rk_b[run1 * 9 + 2] = rk_kkk[run1 + 2] - rk_rhsTemp[2]; rk_b[run1 * 9 + 3] = rk_kkk[run1 + 3] - rk_rhsTemp[3]; rk_b[run1 * 9 + 4] = rk_kkk[run1 + 4] - rk_rhsTemp[4]; rk_b[run1 * 9 + 5] = rk_kkk[run1 + 5] - rk_rhsTemp[5]; rk_b[run1 * 9 + 6] = rk_kkk[run1 + 6] - rk_rhsTemp[6]; rk_b[run1 * 9 + 7] = rk_kkk[run1 + 7] - rk_rhsTemp[7]; rk_b[run1 * 9 + 8] = rk_kkk[run1 + 8] - rk_rhsTemp[8]; } acado_solve_dim9_system_reuse( rk_A, rk_b, rk_dim9_perm ); for (j = 0; j < 1; ++j) { rk_kkk[j] += rk_b[j * 9]; rk_kkk[j + 1] += rk_b[j * 9 + 1]; rk_kkk[j + 2] += rk_b[j * 9 + 2]; rk_kkk[j + 3] += rk_b[j * 9 + 3]; rk_kkk[j + 4] += rk_b[j * 9 + 4]; rk_kkk[j + 5] += rk_b[j * 9 + 5]; rk_kkk[j + 6] += rk_b[j * 9 + 6]; rk_kkk[j + 7] += rk_b[j * 9 + 7]; rk_kkk[j + 8] += rk_b[j * 9 + 8]; } } for (run1 = 0; run1 < 1; ++run1) { for (j = 0; j < 9; ++j) { rk_xxx[j] = rk_eta[j]; tmp_index1 = j; rk_xxx[j] += + acado_Ah_mat[run1]*rk_kkk[tmp_index1]; } acado_diffs( rk_xxx, &(rk_diffsTemp2[ run1 * 108 ]) ); for (j = 0; j < 9; ++j) { tmp_index1 = (run1 * 9) + (j); rk_A[tmp_index1 * 9] = + acado_Ah_mat[run1]*rk_diffsTemp2[(run1 * 108) + (j * 12)]; rk_A[tmp_index1 * 9 + 1] = + acado_Ah_mat[run1]*rk_diffsTemp2[(run1 * 108) + (j * 12 + 1)]; rk_A[tmp_index1 * 9 + 2] = + acado_Ah_mat[run1]*rk_diffsTemp2[(run1 * 108) + (j * 12 + 2)]; rk_A[tmp_index1 * 9 + 3] = + acado_Ah_mat[run1]*rk_diffsTemp2[(run1 * 108) + (j * 12 + 3)]; rk_A[tmp_index1 * 9 + 4] = + acado_Ah_mat[run1]*rk_diffsTemp2[(run1 * 108) + (j * 12 + 4)]; rk_A[tmp_index1 * 9 + 5] = + acado_Ah_mat[run1]*rk_diffsTemp2[(run1 * 108) + (j * 12 + 5)]; rk_A[tmp_index1 * 9 + 6] = + acado_Ah_mat[run1]*rk_diffsTemp2[(run1 * 108) + (j * 12 + 6)]; rk_A[tmp_index1 * 9 + 7] = + acado_Ah_mat[run1]*rk_diffsTemp2[(run1 * 108) + (j * 12 + 7)]; rk_A[tmp_index1 * 9 + 8] = + acado_Ah_mat[run1]*rk_diffsTemp2[(run1 * 108) + (j * 12 + 8)]; if( 0 == run1 ) rk_A[(tmp_index1 * 9) + (j)] -= 1.0000000000000000e+00; } } for (run1 = 0; run1 < 9; ++run1) { for (i = 0; i < 1; ++i) { rk_b[i * 9] = - rk_diffsTemp2[(i * 108) + (run1)]; rk_b[i * 9 + 1] = - rk_diffsTemp2[(i * 108) + (run1 + 12)]; rk_b[i * 9 + 2] = - rk_diffsTemp2[(i * 108) + (run1 + 24)]; rk_b[i * 9 + 3] = - rk_diffsTemp2[(i * 108) + (run1 + 36)]; rk_b[i * 9 + 4] = - rk_diffsTemp2[(i * 108) + (run1 + 48)]; rk_b[i * 9 + 5] = - rk_diffsTemp2[(i * 108) + (run1 + 60)]; rk_b[i * 9 + 6] = - rk_diffsTemp2[(i * 108) + (run1 + 72)]; rk_b[i * 9 + 7] = - rk_diffsTemp2[(i * 108) + (run1 + 84)]; rk_b[i * 9 + 8] = - rk_diffsTemp2[(i * 108) + (run1 + 96)]; } if( 0 == run1 ) { det = acado_solve_dim9_system( rk_A, rk_b, rk_dim9_perm ); } else { acado_solve_dim9_system_reuse( rk_A, rk_b, rk_dim9_perm ); } for (i = 0; i < 1; ++i) { rk_diffK[i] = rk_b[i * 9]; rk_diffK[i + 1] = rk_b[i * 9 + 1]; rk_diffK[i + 2] = rk_b[i * 9 + 2]; rk_diffK[i + 3] = rk_b[i * 9 + 3]; rk_diffK[i + 4] = rk_b[i * 9 + 4]; rk_diffK[i + 5] = rk_b[i * 9 + 5]; rk_diffK[i + 6] = rk_b[i * 9 + 6]; rk_diffK[i + 7] = rk_b[i * 9 + 7]; rk_diffK[i + 8] = rk_b[i * 9 + 8]; } for (i = 0; i < 9; ++i) { rk_diffsNew2[(i * 12) + (run1)] = (i == run1-0); rk_diffsNew2[(i * 12) + (run1)] += + rk_diffK[i]*(real_t)1.0000000000000001e-01; } } for (run1 = 0; run1 < 3; ++run1) { for (i = 0; i < 1; ++i) { for (j = 0; j < 9; ++j) { tmp_index1 = (i * 9) + (j); tmp_index2 = (run1) + (j * 12); rk_b[tmp_index1] = - rk_diffsTemp2[(i * 108) + (tmp_index2 + 9)]; } } acado_solve_dim9_system_reuse( rk_A, rk_b, rk_dim9_perm ); for (i = 0; i < 1; ++i) { rk_diffK[i] = rk_b[i * 9]; rk_diffK[i + 1] = rk_b[i * 9 + 1]; rk_diffK[i + 2] = rk_b[i * 9 + 2]; rk_diffK[i + 3] = rk_b[i * 9 + 3]; rk_diffK[i + 4] = rk_b[i * 9 + 4]; rk_diffK[i + 5] = rk_b[i * 9 + 5]; rk_diffK[i + 6] = rk_b[i * 9 + 6]; rk_diffK[i + 7] = rk_b[i * 9 + 7]; rk_diffK[i + 8] = rk_b[i * 9 + 8]; } for (i = 0; i < 9; ++i) { rk_diffsNew2[(i * 12) + (run1 + 9)] = + rk_diffK[i]*(real_t)1.0000000000000001e-01; } } rk_eta[0] += + rk_kkk[0]*(real_t)1.0000000000000001e-01; rk_eta[1] += + rk_kkk[1]*(real_t)1.0000000000000001e-01; rk_eta[2] += + rk_kkk[2]*(real_t)1.0000000000000001e-01; rk_eta[3] += + rk_kkk[3]*(real_t)1.0000000000000001e-01; rk_eta[4] += + rk_kkk[4]*(real_t)1.0000000000000001e-01; rk_eta[5] += + rk_kkk[5]*(real_t)1.0000000000000001e-01; rk_eta[6] += + rk_kkk[6]*(real_t)1.0000000000000001e-01; rk_eta[7] += + rk_kkk[7]*(real_t)1.0000000000000001e-01; rk_eta[8] += + rk_kkk[8]*(real_t)1.0000000000000001e-01; for (i = 0; i < 9; ++i) { for (j = 0; j < 9; ++j) { tmp_index2 = (j) + (i * 9); rk_eta[tmp_index2 + 9] = rk_diffsNew2[(i * 12) + (j)]; } for (j = 0; j < 3; ++j) { tmp_index2 = (j) + (i * 3); rk_eta[tmp_index2 + 90] = rk_diffsNew2[(i * 12) + (j + 9)]; } } resetIntegrator = 0; rk_ttt += 1.0000000000000000e+00; } for (i = 0; i < 9; ++i) { } if( det < 1e-12 ) { error = 2; } else if( det < 1e-6 ) { error = 1; } else { error = 0; } return error; }
GB_unop__lnot_fp32_fp32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__lnot_fp32_fp32) // op(A') function: GB (_unop_tran__lnot_fp32_fp32) // C type: float // A type: float // cast: float cij = aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ float #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CAST(z, aij) \ float z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = aij ; \ Cx [pC] = !(z != 0) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__lnot_fp32_fp32) ( float *Cx, // Cx and Ax may be aliased const float *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; float z = aij ; Cx [p] = !(z != 0) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; float aij = Ax [p] ; float z = aij ; Cx [p] = !(z != 0) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__lnot_fp32_fp32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__le_bool.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__le_bool // A.*B function (eWiseMult): GB_AemultB__le_bool // A*D function (colscale): GB_AxD__le_bool // D*A function (rowscale): GB_DxB__le_bool // C+=B function (dense accum): GB_Cdense_accumB__le_bool // C+=b function (dense accum): GB_Cdense_accumb__le_bool // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__le_bool // C=scalar+B GB_bind1st__le_bool // C=scalar+B' GB_bind1st_tran__le_bool // C=A+scalar GB_bind2nd__le_bool // C=A'+scalar GB_bind2nd_tran__le_bool // C type: bool // A type: bool // B,b type: bool // BinaryOp: cij = (aij <= bij) #define GB_ATYPE \ bool #define GB_BTYPE \ bool #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ bool aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ bool bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = (x <= y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LE || GxB_NO_BOOL || GxB_NO_LE_BOOL) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__le_bool ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__le_bool ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__le_bool ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type bool bool bwork = (*((bool *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__le_bool ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__le_bool ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__le_bool ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__le_bool ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__le_bool ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; bool x = (*((bool *) x_input)) ; bool *Bx = (bool *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { bool bij = Bx [p] ; Cx [p] = (x <= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__le_bool ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; bool *Ax = (bool *) Ax_input ; bool y = (*((bool *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { bool aij = Ax [p] ; Cx [p] = (aij <= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ bool aij = Ax [pA] ; \ Cx [pC] = (x <= aij) ; \ } GrB_Info GB_bind1st_tran__le_bool ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ bool #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool x = (*((const bool *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ bool } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ bool aij = Ax [pA] ; \ Cx [pC] = (aij <= y) ; \ } GrB_Info GB_bind2nd_tran__le_bool ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool y = (*((const bool *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
parallel.c
#include <stdio.h> #include <stdlib.h> #ifdef _OPENMP #include <omp.h> #else #define omp_get_thread_num() 0 #endif int main(int argc, char **argv){ int thread; if(argc<2){ fprintf(stderr,"\nFalta nºde thread\n"); exit(-1); } thread= atoi(argv[1]); #pragma omp parallel { if(omp_get_thread_num()<thread) printf("thread %d realiza la tarea 1\n",omp_get_thread_num()); else printf("thread %d realiza la tarea 2\n",omp_get_thread_num()); } return(0); }
GB_binop__band_int32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__band_int32) // A.*B function (eWiseMult): GB (_AemultB_08__band_int32) // A.*B function (eWiseMult): GB (_AemultB_02__band_int32) // A.*B function (eWiseMult): GB (_AemultB_04__band_int32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__band_int32) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__band_int32) // C+=b function (dense accum): GB (_Cdense_accumb__band_int32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__band_int32) // C=scalar+B GB (_bind1st__band_int32) // C=scalar+B' GB (_bind1st_tran__band_int32) // C=A+scalar GB (_bind2nd__band_int32) // C=A'+scalar GB (_bind2nd_tran__band_int32) // C type: int32_t // A type: int32_t // A pattern? 0 // B type: int32_t // B pattern? 0 // BinaryOp: cij = (aij) & (bij) #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ int32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int32_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int32_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x) & (y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BAND || GxB_NO_INT32 || GxB_NO_BAND_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__band_int32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__band_int32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__band_int32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__band_int32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int32_t alpha_scalar ; int32_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int32_t *) alpha_scalar_in)) ; beta_scalar = (*((int32_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__band_int32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__band_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__band_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__band_int32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__band_int32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *Cx = (int32_t *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int32_t bij = GBX (Bx, p, false) ; Cx [p] = (x) & (bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__band_int32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int32_t *Cx = (int32_t *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int32_t aij = GBX (Ax, p, false) ; Cx [p] = (aij) & (y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x) & (aij) ; \ } GrB_Info GB (_bind1st_tran__band_int32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij) & (y) ; \ } GrB_Info GB (_bind2nd_tran__band_int32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
bitshuffle_core.c
/* * Bitshuffle - Filter for improving compression of typed binary data. * * Author: Kiyoshi Masui <kiyo@physics.ubc.ca> * Website: http://www.github.com/kiyo-masui/bitshuffle * Created: 2014 * * See LICENSE file for details about copyright and rights to use. * */ #include "bitshuffle_core.h" #include "bitshuffle_internals.h" #include <stdio.h> #include <string.h> #if defined(__AVX2__) && defined (__SSE2__) #define USEAVX2 #endif #if defined(__SSE2__) #define USESSE2 #endif #if defined(__ARM_NEON__) || (__ARM_NEON) #define USEARMNEON #endif // Conditional includes for SSE2 and AVX2. #ifdef USEAVX2 #include <immintrin.h> #elif defined USESSE2 #include <emmintrin.h> #elif defined USEARMNEON #include <arm_neon.h> #endif #if defined(_OPENMP) && defined(_MSC_VER) typedef int64_t omp_size_t; #else typedef size_t omp_size_t; #endif // Macros. #define CHECK_MULT_EIGHT(n) if (n % 8) return -80; #define MAX(X,Y) ((X) > (Y) ? (X) : (Y)) /* ---- Functions indicating compile time instruction set. ---- */ int bshuf_using_NEON(void) { #ifdef USEARMNEON return 1; #else return 0; #endif } int bshuf_using_SSE2(void) { #ifdef USESSE2 return 1; #else return 0; #endif } int bshuf_using_AVX2(void) { #ifdef USEAVX2 return 1; #else return 0; #endif } /* ---- Worker code not requiring special instruction sets. ---- * * The following code does not use any x86 specific vectorized instructions * and should compile on any machine * */ /* Transpose 8x8 bit array packed into a single quadword *x*. * *t* is workspace. */ #define TRANS_BIT_8X8(x, t) { \ t = (x ^ (x >> 7)) & 0x00AA00AA00AA00AALL; \ x = x ^ t ^ (t << 7); \ t = (x ^ (x >> 14)) & 0x0000CCCC0000CCCCLL; \ x = x ^ t ^ (t << 14); \ t = (x ^ (x >> 28)) & 0x00000000F0F0F0F0LL; \ x = x ^ t ^ (t << 28); \ } /* Transpose 8x8 bit array along the diagonal from upper right to lower left */ #define TRANS_BIT_8X8_BE(x, t) { \ t = (x ^ (x >> 9)) & 0x0055005500550055LL; \ x = x ^ t ^ (t << 9); \ t = (x ^ (x >> 18)) & 0x0000333300003333LL; \ x = x ^ t ^ (t << 18); \ t = (x ^ (x >> 36)) & 0x000000000F0F0F0FLL; \ x = x ^ t ^ (t << 36); \ } /* Transpose of an array of arbitrarily typed elements. */ #define TRANS_ELEM_TYPE(in, out, lda, ldb, type_t) { \ size_t ii, jj, kk; \ const type_t* in_type = (const type_t*) in; \ type_t* out_type = (type_t*) out; \ for(ii = 0; ii + 7 < lda; ii += 8) { \ for(jj = 0; jj < ldb; jj++) { \ for(kk = 0; kk < 8; kk++) { \ out_type[jj*lda + ii + kk] = \ in_type[ii*ldb + kk * ldb + jj]; \ } \ } \ } \ for(ii = lda - lda % 8; ii < lda; ii ++) { \ for(jj = 0; jj < ldb; jj++) { \ out_type[jj*lda + ii] = in_type[ii*ldb + jj]; \ } \ } \ } /* Memory copy with bshuf call signature. For testing and profiling. */ int64_t bshuf_copy(const void* in, void* out, const size_t size, const size_t elem_size) { const char* in_b = (const char*) in; char* out_b = (char*) out; memcpy(out_b, in_b, size * elem_size); return size * elem_size; } /* Transpose bytes within elements, starting partway through input. */ int64_t bshuf_trans_byte_elem_remainder(const void* in, void* out, const size_t size, const size_t elem_size, const size_t start) { size_t ii, jj, kk; const char* in_b = (const char*) in; char* out_b = (char*) out; CHECK_MULT_EIGHT(start); if (size > start) { // ii loop separated into 2 loops so the compiler can unroll // the inner one. for (ii = start; ii + 7 < size; ii += 8) { for (jj = 0; jj < elem_size; jj++) { for (kk = 0; kk < 8; kk++) { out_b[jj * size + ii + kk] = in_b[ii * elem_size + kk * elem_size + jj]; } } } for (ii = size - size % 8; ii < size; ii ++) { for (jj = 0; jj < elem_size; jj++) { out_b[jj * size + ii] = in_b[ii * elem_size + jj]; } } } return size * elem_size; } /* Transpose bytes within elements. */ int64_t bshuf_trans_byte_elem_scal(const void* in, void* out, const size_t size, const size_t elem_size) { return bshuf_trans_byte_elem_remainder(in, out, size, elem_size, 0); } /* Transpose bits within bytes. */ int64_t bshuf_trans_bit_byte_remainder(const void* in, void* out, const size_t size, const size_t elem_size, const size_t start_byte) { const uint64_t* in_b = (const uint64_t*) in; uint8_t* out_b = (uint8_t*) out; uint64_t x, t; size_t ii, kk; size_t nbyte = elem_size * size; size_t nbyte_bitrow = nbyte / 8; uint64_t e=1; const int little_endian = *(uint8_t *) &e == 1; const size_t bit_row_skip = little_endian ? nbyte_bitrow : -nbyte_bitrow; const int64_t bit_row_offset = little_endian ? 0 : 7 * nbyte_bitrow; CHECK_MULT_EIGHT(nbyte); CHECK_MULT_EIGHT(start_byte); for (ii = start_byte / 8; ii < nbyte_bitrow; ii ++) { x = in_b[ii]; if (little_endian) { TRANS_BIT_8X8(x, t); } else { TRANS_BIT_8X8_BE(x, t); } for (kk = 0; kk < 8; kk ++) { out_b[bit_row_offset + kk * bit_row_skip + ii] = x; x = x >> 8; } } return size * elem_size; } /* Transpose bits within bytes. */ int64_t bshuf_trans_bit_byte_scal(const void* in, void* out, const size_t size, const size_t elem_size) { return bshuf_trans_bit_byte_remainder(in, out, size, elem_size, 0); } /* General transpose of an array, optimized for large element sizes. */ int64_t bshuf_trans_elem(const void* in, void* out, const size_t lda, const size_t ldb, const size_t elem_size) { size_t ii, jj; const char* in_b = (const char*) in; char* out_b = (char*) out; for(ii = 0; ii < lda; ii++) { for(jj = 0; jj < ldb; jj++) { memcpy(&out_b[(jj*lda + ii) * elem_size], &in_b[(ii*ldb + jj) * elem_size], elem_size); } } return lda * ldb * elem_size; } /* Transpose rows of shuffled bits (size / 8 bytes) within groups of 8. */ int64_t bshuf_trans_bitrow_eight(const void* in, void* out, const size_t size, const size_t elem_size) { size_t nbyte_bitrow = size / 8; CHECK_MULT_EIGHT(size); return bshuf_trans_elem(in, out, 8, elem_size, nbyte_bitrow); } /* Transpose bits within elements. */ int64_t bshuf_trans_bit_elem_scal(const void* in, void* out, const size_t size, const size_t elem_size) { int64_t count; void *tmp_buf; CHECK_MULT_EIGHT(size); tmp_buf = malloc(size * elem_size); if (tmp_buf == NULL) return -1; count = bshuf_trans_byte_elem_scal(in, out, size, elem_size); CHECK_ERR_FREE(count, tmp_buf); count = bshuf_trans_bit_byte_scal(out, tmp_buf, size, elem_size); CHECK_ERR_FREE(count, tmp_buf); count = bshuf_trans_bitrow_eight(tmp_buf, out, size, elem_size); free(tmp_buf); return count; } /* For data organized into a row for each bit (8 * elem_size rows), transpose * the bytes. */ int64_t bshuf_trans_byte_bitrow_scal(const void* in, void* out, const size_t size, const size_t elem_size) { size_t ii, jj, kk, nbyte_row; const char *in_b; char *out_b; in_b = (const char*) in; out_b = (char*) out; nbyte_row = size / 8; CHECK_MULT_EIGHT(size); for (jj = 0; jj < elem_size; jj++) { for (ii = 0; ii < nbyte_row; ii++) { for (kk = 0; kk < 8; kk++) { out_b[ii * 8 * elem_size + jj * 8 + kk] = \ in_b[(jj * 8 + kk) * nbyte_row + ii]; } } } return size * elem_size; } /* Shuffle bits within the bytes of eight element blocks. */ int64_t bshuf_shuffle_bit_eightelem_scal(const void* in, void* out, \ const size_t size, const size_t elem_size) { const char *in_b; char *out_b; uint64_t x, t; size_t ii, jj, kk; size_t nbyte, out_index; uint64_t e=1; const int little_endian = *(uint8_t *) &e == 1; const size_t elem_skip = little_endian ? elem_size : -elem_size; const uint64_t elem_offset = little_endian ? 0 : 7 * elem_size; CHECK_MULT_EIGHT(size); in_b = (const char*) in; out_b = (char*) out; nbyte = elem_size * size; for (jj = 0; jj < 8 * elem_size; jj += 8) { for (ii = 0; ii + 8 * elem_size - 1 < nbyte; ii += 8 * elem_size) { x = *((uint64_t*) &in_b[ii + jj]); if (little_endian) { TRANS_BIT_8X8(x, t); } else { TRANS_BIT_8X8_BE(x, t); } for (kk = 0; kk < 8; kk++) { out_index = ii + jj / 8 + elem_offset + kk * elem_skip; *((uint8_t*) &out_b[out_index]) = x; x = x >> 8; } } } return size * elem_size; } /* Untranspose bits within elements. */ int64_t bshuf_untrans_bit_elem_scal(const void* in, void* out, const size_t size, const size_t elem_size) { int64_t count; void *tmp_buf; CHECK_MULT_EIGHT(size); tmp_buf = malloc(size * elem_size); if (tmp_buf == NULL) return -1; count = bshuf_trans_byte_bitrow_scal(in, tmp_buf, size, elem_size); CHECK_ERR_FREE(count, tmp_buf); count = bshuf_shuffle_bit_eightelem_scal(tmp_buf, out, size, elem_size); free(tmp_buf); return count; } /* ---- Worker code that uses Arm NEON ---- * * The following code makes use of the Arm NEON instruction set. * NEON technology is the implementation of the ARM Advanced Single * Instruction Multiple Data (SIMD) extension. * The NEON unit is the component of the processor that executes SIMD instructions. * It is also called the NEON Media Processing Engine (MPE). * */ #ifdef USEARMNEON /* Transpose bytes within elements for 16 bit elements. */ int64_t bshuf_trans_byte_elem_NEON_16(const void* in, void* out, const size_t size) { size_t ii; const char *in_b = (const char*) in; char *out_b = (char*) out; int8x16_t a0, b0, a1, b1; for (ii=0; ii + 15 < size; ii += 16) { a0 = vld1q_s8(in_b + 2*ii + 0*16); b0 = vld1q_s8(in_b + 2*ii + 1*16); a1 = vzip1q_s8(a0, b0); b1 = vzip2q_s8(a0, b0); a0 = vzip1q_s8(a1, b1); b0 = vzip2q_s8(a1, b1); a1 = vzip1q_s8(a0, b0); b1 = vzip2q_s8(a0, b0); a0 = vzip1q_s8(a1, b1); b0 = vzip2q_s8(a1, b1); vst1q_s8(out_b + 0*size + ii, a0); vst1q_s8(out_b + 1*size + ii, b0); } return bshuf_trans_byte_elem_remainder(in, out, size, 2, size - size % 16); } /* Transpose bytes within elements for 32 bit elements. */ int64_t bshuf_trans_byte_elem_NEON_32(const void* in, void* out, const size_t size) { size_t ii; const char *in_b; char *out_b; in_b = (const char*) in; out_b = (char*) out; int8x16_t a0, b0, c0, d0, a1, b1, c1, d1; int64x2_t a2, b2, c2, d2; for (ii=0; ii + 15 < size; ii += 16) { a0 = vld1q_s8(in_b + 4*ii + 0*16); b0 = vld1q_s8(in_b + 4*ii + 1*16); c0 = vld1q_s8(in_b + 4*ii + 2*16); d0 = vld1q_s8(in_b + 4*ii + 3*16); a1 = vzip1q_s8(a0, b0); b1 = vzip2q_s8(a0, b0); c1 = vzip1q_s8(c0, d0); d1 = vzip2q_s8(c0, d0); a0 = vzip1q_s8(a1, b1); b0 = vzip2q_s8(a1, b1); c0 = vzip1q_s8(c1, d1); d0 = vzip2q_s8(c1, d1); a1 = vzip1q_s8(a0, b0); b1 = vzip2q_s8(a0, b0); c1 = vzip1q_s8(c0, d0); d1 = vzip2q_s8(c0, d0); a2 = vzip1q_s64(vreinterpretq_s64_s8(a1), vreinterpretq_s64_s8(c1)); b2 = vzip2q_s64(vreinterpretq_s64_s8(a1), vreinterpretq_s64_s8(c1)); c2 = vzip1q_s64(vreinterpretq_s64_s8(b1), vreinterpretq_s64_s8(d1)); d2 = vzip2q_s64(vreinterpretq_s64_s8(b1), vreinterpretq_s64_s8(d1)); vst1q_s64((int64_t *) (out_b + 0*size + ii), a2); vst1q_s64((int64_t *) (out_b + 1*size + ii), b2); vst1q_s64((int64_t *) (out_b + 2*size + ii), c2); vst1q_s64((int64_t *) (out_b + 3*size + ii), d2); } return bshuf_trans_byte_elem_remainder(in, out, size, 4, size - size % 16); } /* Transpose bytes within elements for 64 bit elements. */ int64_t bshuf_trans_byte_elem_NEON_64(const void* in, void* out, const size_t size) { size_t ii; const char* in_b = (const char*) in; char* out_b = (char*) out; int8x16_t a0, b0, c0, d0, e0, f0, g0, h0; int8x16_t a1, b1, c1, d1, e1, f1, g1, h1; for (ii=0; ii + 15 < size; ii += 16) { a0 = vld1q_s8(in_b + 8*ii + 0*16); b0 = vld1q_s8(in_b + 8*ii + 1*16); c0 = vld1q_s8(in_b + 8*ii + 2*16); d0 = vld1q_s8(in_b + 8*ii + 3*16); e0 = vld1q_s8(in_b + 8*ii + 4*16); f0 = vld1q_s8(in_b + 8*ii + 5*16); g0 = vld1q_s8(in_b + 8*ii + 6*16); h0 = vld1q_s8(in_b + 8*ii + 7*16); a1 = vzip1q_s8 (a0, b0); b1 = vzip2q_s8 (a0, b0); c1 = vzip1q_s8 (c0, d0); d1 = vzip2q_s8 (c0, d0); e1 = vzip1q_s8 (e0, f0); f1 = vzip2q_s8 (e0, f0); g1 = vzip1q_s8 (g0, h0); h1 = vzip2q_s8 (g0, h0); a0 = vzip1q_s8 (a1, b1); b0 = vzip2q_s8 (a1, b1); c0 = vzip1q_s8 (c1, d1); d0 = vzip2q_s8 (c1, d1); e0 = vzip1q_s8 (e1, f1); f0 = vzip2q_s8 (e1, f1); g0 = vzip1q_s8 (g1, h1); h0 = vzip2q_s8 (g1, h1); a1 = (int8x16_t) vzip1q_s32 (vreinterpretq_s32_s8 (a0), vreinterpretq_s32_s8 (c0)); b1 = (int8x16_t) vzip2q_s32 (vreinterpretq_s32_s8 (a0), vreinterpretq_s32_s8 (c0)); c1 = (int8x16_t) vzip1q_s32 (vreinterpretq_s32_s8 (b0), vreinterpretq_s32_s8 (d0)); d1 = (int8x16_t) vzip2q_s32 (vreinterpretq_s32_s8 (b0), vreinterpretq_s32_s8 (d0)); e1 = (int8x16_t) vzip1q_s32 (vreinterpretq_s32_s8 (e0), vreinterpretq_s32_s8 (g0)); f1 = (int8x16_t) vzip2q_s32 (vreinterpretq_s32_s8 (e0), vreinterpretq_s32_s8 (g0)); g1 = (int8x16_t) vzip1q_s32 (vreinterpretq_s32_s8 (f0), vreinterpretq_s32_s8 (h0)); h1 = (int8x16_t) vzip2q_s32 (vreinterpretq_s32_s8 (f0), vreinterpretq_s32_s8 (h0)); a0 = (int8x16_t) vzip1q_s64 (vreinterpretq_s64_s8 (a1), vreinterpretq_s64_s8 (e1)); b0 = (int8x16_t) vzip2q_s64 (vreinterpretq_s64_s8 (a1), vreinterpretq_s64_s8 (e1)); c0 = (int8x16_t) vzip1q_s64 (vreinterpretq_s64_s8 (b1), vreinterpretq_s64_s8 (f1)); d0 = (int8x16_t) vzip2q_s64 (vreinterpretq_s64_s8 (b1), vreinterpretq_s64_s8 (f1)); e0 = (int8x16_t) vzip1q_s64 (vreinterpretq_s64_s8 (c1), vreinterpretq_s64_s8 (g1)); f0 = (int8x16_t) vzip2q_s64 (vreinterpretq_s64_s8 (c1), vreinterpretq_s64_s8 (g1)); g0 = (int8x16_t) vzip1q_s64 (vreinterpretq_s64_s8 (d1), vreinterpretq_s64_s8 (h1)); h0 = (int8x16_t) vzip2q_s64 (vreinterpretq_s64_s8 (d1), vreinterpretq_s64_s8 (h1)); vst1q_s8(out_b + 0*size + ii, a0); vst1q_s8(out_b + 1*size + ii, b0); vst1q_s8(out_b + 2*size + ii, c0); vst1q_s8(out_b + 3*size + ii, d0); vst1q_s8(out_b + 4*size + ii, e0); vst1q_s8(out_b + 5*size + ii, f0); vst1q_s8(out_b + 6*size + ii, g0); vst1q_s8(out_b + 7*size + ii, h0); } return bshuf_trans_byte_elem_remainder(in, out, size, 8, size - size % 16); } /* Transpose bytes within elements using best NEON algorithm available. */ int64_t bshuf_trans_byte_elem_NEON(const void* in, void* out, const size_t size, const size_t elem_size) { int64_t count; // Trivial cases: power of 2 bytes. switch (elem_size) { case 1: count = bshuf_copy(in, out, size, elem_size); return count; case 2: count = bshuf_trans_byte_elem_NEON_16(in, out, size); return count; case 4: count = bshuf_trans_byte_elem_NEON_32(in, out, size); return count; case 8: count = bshuf_trans_byte_elem_NEON_64(in, out, size); return count; } // Worst case: odd number of bytes. Turns out that this is faster for // (odd * 2) byte elements as well (hence % 4). if (elem_size % 4) { count = bshuf_trans_byte_elem_scal(in, out, size, elem_size); return count; } // Multiple of power of 2: transpose hierarchically. { size_t nchunk_elem; void* tmp_buf = malloc(size * elem_size); if (tmp_buf == NULL) return -1; if ((elem_size % 8) == 0) { nchunk_elem = elem_size / 8; TRANS_ELEM_TYPE(in, out, size, nchunk_elem, int64_t); count = bshuf_trans_byte_elem_NEON_64(out, tmp_buf, size * nchunk_elem); bshuf_trans_elem(tmp_buf, out, 8, nchunk_elem, size); } else if ((elem_size % 4) == 0) { nchunk_elem = elem_size / 4; TRANS_ELEM_TYPE(in, out, size, nchunk_elem, int32_t); count = bshuf_trans_byte_elem_NEON_32(out, tmp_buf, size * nchunk_elem); bshuf_trans_elem(tmp_buf, out, 4, nchunk_elem, size); } else { // Not used since scalar algorithm is faster. nchunk_elem = elem_size / 2; TRANS_ELEM_TYPE(in, out, size, nchunk_elem, int16_t); count = bshuf_trans_byte_elem_NEON_16(out, tmp_buf, size * nchunk_elem); bshuf_trans_elem(tmp_buf, out, 2, nchunk_elem, size); } free(tmp_buf); return count; } } /* Creates a mask made up of the most significant * bit of each byte of 'input' */ int32_t move_byte_mask_neon(uint8x16_t input) { return ( ((input[0] & 0x80) >> 7) | (((input[1] & 0x80) >> 7) << 1) | (((input[2] & 0x80) >> 7) << 2) | (((input[3] & 0x80) >> 7) << 3) | (((input[4] & 0x80) >> 7) << 4) | (((input[5] & 0x80) >> 7) << 5) | (((input[6] & 0x80) >> 7) << 6) | (((input[7] & 0x80) >> 7) << 7) | (((input[8] & 0x80) >> 7) << 8) | (((input[9] & 0x80) >> 7) << 9) | (((input[10] & 0x80) >> 7) << 10) | (((input[11] & 0x80) >> 7) << 11) | (((input[12] & 0x80) >> 7) << 12) | (((input[13] & 0x80) >> 7) << 13) | (((input[14] & 0x80) >> 7) << 14) | (((input[15] & 0x80) >> 7) << 15) ); } /* Transpose bits within bytes. */ int64_t bshuf_trans_bit_byte_NEON(const void* in, void* out, const size_t size, const size_t elem_size) { size_t ii, kk; const char* in_b = (const char*) in; char* out_b = (char*) out; uint16_t* out_ui16; int64_t count; size_t nbyte = elem_size * size; CHECK_MULT_EIGHT(nbyte); int16x8_t xmm; int32_t bt; for (ii = 0; ii + 15 < nbyte; ii += 16) { xmm = vld1q_s16((int16_t *) (in_b + ii)); for (kk = 0; kk < 8; kk++) { bt = move_byte_mask_neon((uint8x16_t) xmm); xmm = vshlq_n_s16(xmm, 1); out_ui16 = (uint16_t*) &out_b[((7 - kk) * nbyte + ii) / 8]; *out_ui16 = bt; } } count = bshuf_trans_bit_byte_remainder(in, out, size, elem_size, nbyte - nbyte % 16); return count; } /* Transpose bits within elements. */ int64_t bshuf_trans_bit_elem_NEON(const void* in, void* out, const size_t size, const size_t elem_size) { int64_t count; CHECK_MULT_EIGHT(size); void* tmp_buf = malloc(size * elem_size); if (tmp_buf == NULL) return -1; count = bshuf_trans_byte_elem_NEON(in, out, size, elem_size); CHECK_ERR_FREE(count, tmp_buf); count = bshuf_trans_bit_byte_NEON(out, tmp_buf, size, elem_size); CHECK_ERR_FREE(count, tmp_buf); count = bshuf_trans_bitrow_eight(tmp_buf, out, size, elem_size); free(tmp_buf); return count; } /* For data organized into a row for each bit (8 * elem_size rows), transpose * the bytes. */ int64_t bshuf_trans_byte_bitrow_NEON(const void* in, void* out, const size_t size, const size_t elem_size) { size_t ii, jj; const char* in_b = (const char*) in; char* out_b = (char*) out; CHECK_MULT_EIGHT(size); size_t nrows = 8 * elem_size; size_t nbyte_row = size / 8; int8x16_t a0, b0, c0, d0, e0, f0, g0, h0; int8x16_t a1, b1, c1, d1, e1, f1, g1, h1; int64x1_t *as, *bs, *cs, *ds, *es, *fs, *gs, *hs; for (ii = 0; ii + 7 < nrows; ii += 8) { for (jj = 0; jj + 15 < nbyte_row; jj += 16) { a0 = vld1q_s8(in_b + (ii + 0)*nbyte_row + jj); b0 = vld1q_s8(in_b + (ii + 1)*nbyte_row + jj); c0 = vld1q_s8(in_b + (ii + 2)*nbyte_row + jj); d0 = vld1q_s8(in_b + (ii + 3)*nbyte_row + jj); e0 = vld1q_s8(in_b + (ii + 4)*nbyte_row + jj); f0 = vld1q_s8(in_b + (ii + 5)*nbyte_row + jj); g0 = vld1q_s8(in_b + (ii + 6)*nbyte_row + jj); h0 = vld1q_s8(in_b + (ii + 7)*nbyte_row + jj); a1 = vzip1q_s8(a0, b0); b1 = vzip1q_s8(c0, d0); c1 = vzip1q_s8(e0, f0); d1 = vzip1q_s8(g0, h0); e1 = vzip2q_s8(a0, b0); f1 = vzip2q_s8(c0, d0); g1 = vzip2q_s8(e0, f0); h1 = vzip2q_s8(g0, h0); a0 = (int8x16_t) vzip1q_s16 (vreinterpretq_s16_s8 (a1), vreinterpretq_s16_s8 (b1)); b0= (int8x16_t) vzip1q_s16 (vreinterpretq_s16_s8 (c1), vreinterpretq_s16_s8 (d1)); c0 = (int8x16_t) vzip2q_s16 (vreinterpretq_s16_s8 (a1), vreinterpretq_s16_s8 (b1)); d0 = (int8x16_t) vzip2q_s16 (vreinterpretq_s16_s8 (c1), vreinterpretq_s16_s8 (d1)); e0 = (int8x16_t) vzip1q_s16 (vreinterpretq_s16_s8 (e1), vreinterpretq_s16_s8 (f1)); f0 = (int8x16_t) vzip1q_s16 (vreinterpretq_s16_s8 (g1), vreinterpretq_s16_s8 (h1)); g0 = (int8x16_t) vzip2q_s16 (vreinterpretq_s16_s8 (e1), vreinterpretq_s16_s8 (f1)); h0 = (int8x16_t) vzip2q_s16 (vreinterpretq_s16_s8 (g1), vreinterpretq_s16_s8 (h1)); a1 = (int8x16_t) vzip1q_s32 (vreinterpretq_s32_s8 (a0), vreinterpretq_s32_s8 (b0)); b1 = (int8x16_t) vzip2q_s32 (vreinterpretq_s32_s8 (a0), vreinterpretq_s32_s8 (b0)); c1 = (int8x16_t) vzip1q_s32 (vreinterpretq_s32_s8 (c0), vreinterpretq_s32_s8 (d0)); d1 = (int8x16_t) vzip2q_s32 (vreinterpretq_s32_s8 (c0), vreinterpretq_s32_s8 (d0)); e1 = (int8x16_t) vzip1q_s32 (vreinterpretq_s32_s8 (e0), vreinterpretq_s32_s8 (f0)); f1 = (int8x16_t) vzip2q_s32 (vreinterpretq_s32_s8 (e0), vreinterpretq_s32_s8 (f0)); g1 = (int8x16_t) vzip1q_s32 (vreinterpretq_s32_s8 (g0), vreinterpretq_s32_s8 (h0)); h1 = (int8x16_t) vzip2q_s32 (vreinterpretq_s32_s8 (g0), vreinterpretq_s32_s8 (h0)); as = (int64x1_t *) &a1; bs = (int64x1_t *) &b1; cs = (int64x1_t *) &c1; ds = (int64x1_t *) &d1; es = (int64x1_t *) &e1; fs = (int64x1_t *) &f1; gs = (int64x1_t *) &g1; hs = (int64x1_t *) &h1; vst1_s64((int64_t *)(out_b + (jj + 0) * nrows + ii), *as); vst1_s64((int64_t *)(out_b + (jj + 1) * nrows + ii), *(as + 1)); vst1_s64((int64_t *)(out_b + (jj + 2) * nrows + ii), *bs); vst1_s64((int64_t *)(out_b + (jj + 3) * nrows + ii), *(bs + 1)); vst1_s64((int64_t *)(out_b + (jj + 4) * nrows + ii), *cs); vst1_s64((int64_t *)(out_b + (jj + 5) * nrows + ii), *(cs + 1)); vst1_s64((int64_t *)(out_b + (jj + 6) * nrows + ii), *ds); vst1_s64((int64_t *)(out_b + (jj + 7) * nrows + ii), *(ds + 1)); vst1_s64((int64_t *)(out_b + (jj + 8) * nrows + ii), *es); vst1_s64((int64_t *)(out_b + (jj + 9) * nrows + ii), *(es + 1)); vst1_s64((int64_t *)(out_b + (jj + 10) * nrows + ii), *fs); vst1_s64((int64_t *)(out_b + (jj + 11) * nrows + ii), *(fs + 1)); vst1_s64((int64_t *)(out_b + (jj + 12) * nrows + ii), *gs); vst1_s64((int64_t *)(out_b + (jj + 13) * nrows + ii), *(gs + 1)); vst1_s64((int64_t *)(out_b + (jj + 14) * nrows + ii), *hs); vst1_s64((int64_t *)(out_b + (jj + 15) * nrows + ii), *(hs + 1)); } for (jj = nbyte_row - nbyte_row % 16; jj < nbyte_row; jj ++) { out_b[jj * nrows + ii + 0] = in_b[(ii + 0)*nbyte_row + jj]; out_b[jj * nrows + ii + 1] = in_b[(ii + 1)*nbyte_row + jj]; out_b[jj * nrows + ii + 2] = in_b[(ii + 2)*nbyte_row + jj]; out_b[jj * nrows + ii + 3] = in_b[(ii + 3)*nbyte_row + jj]; out_b[jj * nrows + ii + 4] = in_b[(ii + 4)*nbyte_row + jj]; out_b[jj * nrows + ii + 5] = in_b[(ii + 5)*nbyte_row + jj]; out_b[jj * nrows + ii + 6] = in_b[(ii + 6)*nbyte_row + jj]; out_b[jj * nrows + ii + 7] = in_b[(ii + 7)*nbyte_row + jj]; } } return size * elem_size; } /* Shuffle bits within the bytes of eight element blocks. */ int64_t bshuf_shuffle_bit_eightelem_NEON(const void* in, void* out, const size_t size, const size_t elem_size) { CHECK_MULT_EIGHT(size); // With a bit of care, this could be written such that such that it is // in_buf = out_buf safe. const char* in_b = (const char*) in; uint16_t* out_ui16 = (uint16_t*) out; size_t ii, jj, kk; size_t nbyte = elem_size * size; int16x8_t xmm; int32_t bt; if (elem_size % 2) { bshuf_shuffle_bit_eightelem_scal(in, out, size, elem_size); } else { for (ii = 0; ii + 8 * elem_size - 1 < nbyte; ii += 8 * elem_size) { for (jj = 0; jj + 15 < 8 * elem_size; jj += 16) { xmm = vld1q_s16((int16_t *) &in_b[ii + jj]); for (kk = 0; kk < 8; kk++) { bt = move_byte_mask_neon((uint8x16_t) xmm); xmm = vshlq_n_s16(xmm, 1); size_t ind = (ii + jj / 8 + (7 - kk) * elem_size); out_ui16[ind / 2] = bt; } } } } return size * elem_size; } /* Untranspose bits within elements. */ int64_t bshuf_untrans_bit_elem_NEON(const void* in, void* out, const size_t size, const size_t elem_size) { int64_t count; CHECK_MULT_EIGHT(size); void* tmp_buf = malloc(size * elem_size); if (tmp_buf == NULL) return -1; count = bshuf_trans_byte_bitrow_NEON(in, tmp_buf, size, elem_size); CHECK_ERR_FREE(count, tmp_buf); count = bshuf_shuffle_bit_eightelem_NEON(tmp_buf, out, size, elem_size); free(tmp_buf); return count; } #else // #ifdef USEARMNEON int64_t bshuf_untrans_bit_elem_NEON(const void* in, void* out, const size_t size, const size_t elem_size) { return -13; } int64_t bshuf_trans_bit_elem_NEON(const void* in, void* out, const size_t size, const size_t elem_size) { return -13; } int64_t bshuf_trans_byte_bitrow_NEON(const void* in, void* out, const size_t size, const size_t elem_size) { return -13; } int64_t bshuf_trans_bit_byte_NEON(const void* in, void* out, const size_t size, const size_t elem_size) { return -13; } int64_t bshuf_trans_byte_elem_NEON(const void* in, void* out, const size_t size, const size_t elem_size) { return -13; } int64_t bshuf_trans_byte_elem_NEON_64(const void* in, void* out, const size_t size) { return -13; } int64_t bshuf_trans_byte_elem_NEON_32(const void* in, void* out, const size_t size) { return -13; } int64_t bshuf_trans_byte_elem_NEON_16(const void* in, void* out, const size_t size) { return -13; } int64_t bshuf_shuffle_bit_eightelem_NEON(const void* in, void* out, const size_t size, const size_t elem_size) { return -13; } #endif /* ---- Worker code that uses SSE2 ---- * * The following code makes use of the SSE2 instruction set and specialized * 16 byte registers. The SSE2 instructions are present on modern x86 * processors. The first Intel processor microarchitecture supporting SSE2 was * Pentium 4 (2000). * */ #ifdef USESSE2 /* Transpose bytes within elements for 16 bit elements. */ int64_t bshuf_trans_byte_elem_SSE_16(const void* in, void* out, const size_t size) { size_t ii; const char *in_b = (const char*) in; char *out_b = (char*) out; __m128i a0, b0, a1, b1; for (ii=0; ii + 15 < size; ii += 16) { a0 = _mm_loadu_si128((__m128i *) &in_b[2*ii + 0*16]); b0 = _mm_loadu_si128((__m128i *) &in_b[2*ii + 1*16]); a1 = _mm_unpacklo_epi8(a0, b0); b1 = _mm_unpackhi_epi8(a0, b0); a0 = _mm_unpacklo_epi8(a1, b1); b0 = _mm_unpackhi_epi8(a1, b1); a1 = _mm_unpacklo_epi8(a0, b0); b1 = _mm_unpackhi_epi8(a0, b0); a0 = _mm_unpacklo_epi8(a1, b1); b0 = _mm_unpackhi_epi8(a1, b1); _mm_storeu_si128((__m128i *) &out_b[0*size + ii], a0); _mm_storeu_si128((__m128i *) &out_b[1*size + ii], b0); } return bshuf_trans_byte_elem_remainder(in, out, size, 2, size - size % 16); } /* Transpose bytes within elements for 32 bit elements. */ int64_t bshuf_trans_byte_elem_SSE_32(const void* in, void* out, const size_t size) { size_t ii; const char *in_b; char *out_b; in_b = (const char*) in; out_b = (char*) out; __m128i a0, b0, c0, d0, a1, b1, c1, d1; for (ii=0; ii + 15 < size; ii += 16) { a0 = _mm_loadu_si128((__m128i *) &in_b[4*ii + 0*16]); b0 = _mm_loadu_si128((__m128i *) &in_b[4*ii + 1*16]); c0 = _mm_loadu_si128((__m128i *) &in_b[4*ii + 2*16]); d0 = _mm_loadu_si128((__m128i *) &in_b[4*ii + 3*16]); a1 = _mm_unpacklo_epi8(a0, b0); b1 = _mm_unpackhi_epi8(a0, b0); c1 = _mm_unpacklo_epi8(c0, d0); d1 = _mm_unpackhi_epi8(c0, d0); a0 = _mm_unpacklo_epi8(a1, b1); b0 = _mm_unpackhi_epi8(a1, b1); c0 = _mm_unpacklo_epi8(c1, d1); d0 = _mm_unpackhi_epi8(c1, d1); a1 = _mm_unpacklo_epi8(a0, b0); b1 = _mm_unpackhi_epi8(a0, b0); c1 = _mm_unpacklo_epi8(c0, d0); d1 = _mm_unpackhi_epi8(c0, d0); a0 = _mm_unpacklo_epi64(a1, c1); b0 = _mm_unpackhi_epi64(a1, c1); c0 = _mm_unpacklo_epi64(b1, d1); d0 = _mm_unpackhi_epi64(b1, d1); _mm_storeu_si128((__m128i *) &out_b[0*size + ii], a0); _mm_storeu_si128((__m128i *) &out_b[1*size + ii], b0); _mm_storeu_si128((__m128i *) &out_b[2*size + ii], c0); _mm_storeu_si128((__m128i *) &out_b[3*size + ii], d0); } return bshuf_trans_byte_elem_remainder(in, out, size, 4, size - size % 16); } /* Transpose bytes within elements for 64 bit elements. */ int64_t bshuf_trans_byte_elem_SSE_64(const void* in, void* out, const size_t size) { size_t ii; const char* in_b = (const char*) in; char* out_b = (char*) out; __m128i a0, b0, c0, d0, e0, f0, g0, h0; __m128i a1, b1, c1, d1, e1, f1, g1, h1; for (ii=0; ii + 15 < size; ii += 16) { a0 = _mm_loadu_si128((__m128i *) &in_b[8*ii + 0*16]); b0 = _mm_loadu_si128((__m128i *) &in_b[8*ii + 1*16]); c0 = _mm_loadu_si128((__m128i *) &in_b[8*ii + 2*16]); d0 = _mm_loadu_si128((__m128i *) &in_b[8*ii + 3*16]); e0 = _mm_loadu_si128((__m128i *) &in_b[8*ii + 4*16]); f0 = _mm_loadu_si128((__m128i *) &in_b[8*ii + 5*16]); g0 = _mm_loadu_si128((__m128i *) &in_b[8*ii + 6*16]); h0 = _mm_loadu_si128((__m128i *) &in_b[8*ii + 7*16]); a1 = _mm_unpacklo_epi8(a0, b0); b1 = _mm_unpackhi_epi8(a0, b0); c1 = _mm_unpacklo_epi8(c0, d0); d1 = _mm_unpackhi_epi8(c0, d0); e1 = _mm_unpacklo_epi8(e0, f0); f1 = _mm_unpackhi_epi8(e0, f0); g1 = _mm_unpacklo_epi8(g0, h0); h1 = _mm_unpackhi_epi8(g0, h0); a0 = _mm_unpacklo_epi8(a1, b1); b0 = _mm_unpackhi_epi8(a1, b1); c0 = _mm_unpacklo_epi8(c1, d1); d0 = _mm_unpackhi_epi8(c1, d1); e0 = _mm_unpacklo_epi8(e1, f1); f0 = _mm_unpackhi_epi8(e1, f1); g0 = _mm_unpacklo_epi8(g1, h1); h0 = _mm_unpackhi_epi8(g1, h1); a1 = _mm_unpacklo_epi32(a0, c0); b1 = _mm_unpackhi_epi32(a0, c0); c1 = _mm_unpacklo_epi32(b0, d0); d1 = _mm_unpackhi_epi32(b0, d0); e1 = _mm_unpacklo_epi32(e0, g0); f1 = _mm_unpackhi_epi32(e0, g0); g1 = _mm_unpacklo_epi32(f0, h0); h1 = _mm_unpackhi_epi32(f0, h0); a0 = _mm_unpacklo_epi64(a1, e1); b0 = _mm_unpackhi_epi64(a1, e1); c0 = _mm_unpacklo_epi64(b1, f1); d0 = _mm_unpackhi_epi64(b1, f1); e0 = _mm_unpacklo_epi64(c1, g1); f0 = _mm_unpackhi_epi64(c1, g1); g0 = _mm_unpacklo_epi64(d1, h1); h0 = _mm_unpackhi_epi64(d1, h1); _mm_storeu_si128((__m128i *) &out_b[0*size + ii], a0); _mm_storeu_si128((__m128i *) &out_b[1*size + ii], b0); _mm_storeu_si128((__m128i *) &out_b[2*size + ii], c0); _mm_storeu_si128((__m128i *) &out_b[3*size + ii], d0); _mm_storeu_si128((__m128i *) &out_b[4*size + ii], e0); _mm_storeu_si128((__m128i *) &out_b[5*size + ii], f0); _mm_storeu_si128((__m128i *) &out_b[6*size + ii], g0); _mm_storeu_si128((__m128i *) &out_b[7*size + ii], h0); } return bshuf_trans_byte_elem_remainder(in, out, size, 8, size - size % 16); } /* Transpose bytes within elements using best SSE algorithm available. */ int64_t bshuf_trans_byte_elem_SSE(const void* in, void* out, const size_t size, const size_t elem_size) { int64_t count; // Trivial cases: power of 2 bytes. switch (elem_size) { case 1: count = bshuf_copy(in, out, size, elem_size); return count; case 2: count = bshuf_trans_byte_elem_SSE_16(in, out, size); return count; case 4: count = bshuf_trans_byte_elem_SSE_32(in, out, size); return count; case 8: count = bshuf_trans_byte_elem_SSE_64(in, out, size); return count; } // Worst case: odd number of bytes. Turns out that this is faster for // (odd * 2) byte elements as well (hence % 4). if (elem_size % 4) { count = bshuf_trans_byte_elem_scal(in, out, size, elem_size); return count; } // Multiple of power of 2: transpose hierarchically. { size_t nchunk_elem; void* tmp_buf = malloc(size * elem_size); if (tmp_buf == NULL) return -1; if ((elem_size % 8) == 0) { nchunk_elem = elem_size / 8; TRANS_ELEM_TYPE(in, out, size, nchunk_elem, int64_t); count = bshuf_trans_byte_elem_SSE_64(out, tmp_buf, size * nchunk_elem); bshuf_trans_elem(tmp_buf, out, 8, nchunk_elem, size); } else if ((elem_size % 4) == 0) { nchunk_elem = elem_size / 4; TRANS_ELEM_TYPE(in, out, size, nchunk_elem, int32_t); count = bshuf_trans_byte_elem_SSE_32(out, tmp_buf, size * nchunk_elem); bshuf_trans_elem(tmp_buf, out, 4, nchunk_elem, size); } else { // Not used since scalar algorithm is faster. nchunk_elem = elem_size / 2; TRANS_ELEM_TYPE(in, out, size, nchunk_elem, int16_t); count = bshuf_trans_byte_elem_SSE_16(out, tmp_buf, size * nchunk_elem); bshuf_trans_elem(tmp_buf, out, 2, nchunk_elem, size); } free(tmp_buf); return count; } } /* Transpose bits within bytes. */ int64_t bshuf_trans_bit_byte_SSE(const void* in, void* out, const size_t size, const size_t elem_size) { size_t ii, kk; const char* in_b = (const char*) in; char* out_b = (char*) out; uint16_t* out_ui16; int64_t count; size_t nbyte = elem_size * size; CHECK_MULT_EIGHT(nbyte); __m128i xmm; int32_t bt; for (ii = 0; ii + 15 < nbyte; ii += 16) { xmm = _mm_loadu_si128((__m128i *) &in_b[ii]); for (kk = 0; kk < 8; kk++) { bt = _mm_movemask_epi8(xmm); xmm = _mm_slli_epi16(xmm, 1); out_ui16 = (uint16_t*) &out_b[((7 - kk) * nbyte + ii) / 8]; *out_ui16 = bt; } } count = bshuf_trans_bit_byte_remainder(in, out, size, elem_size, nbyte - nbyte % 16); return count; } /* Transpose bits within elements. */ int64_t bshuf_trans_bit_elem_SSE(const void* in, void* out, const size_t size, const size_t elem_size) { int64_t count; CHECK_MULT_EIGHT(size); void* tmp_buf = malloc(size * elem_size); if (tmp_buf == NULL) return -1; count = bshuf_trans_byte_elem_SSE(in, out, size, elem_size); CHECK_ERR_FREE(count, tmp_buf); count = bshuf_trans_bit_byte_SSE(out, tmp_buf, size, elem_size); CHECK_ERR_FREE(count, tmp_buf); count = bshuf_trans_bitrow_eight(tmp_buf, out, size, elem_size); free(tmp_buf); return count; } /* For data organized into a row for each bit (8 * elem_size rows), transpose * the bytes. */ int64_t bshuf_trans_byte_bitrow_SSE(const void* in, void* out, const size_t size, const size_t elem_size) { size_t ii, jj; const char* in_b = (const char*) in; char* out_b = (char*) out; CHECK_MULT_EIGHT(size); size_t nrows = 8 * elem_size; size_t nbyte_row = size / 8; __m128i a0, b0, c0, d0, e0, f0, g0, h0; __m128i a1, b1, c1, d1, e1, f1, g1, h1; __m128 *as, *bs, *cs, *ds, *es, *fs, *gs, *hs; for (ii = 0; ii + 7 < nrows; ii += 8) { for (jj = 0; jj + 15 < nbyte_row; jj += 16) { a0 = _mm_loadu_si128((__m128i *) &in_b[(ii + 0)*nbyte_row + jj]); b0 = _mm_loadu_si128((__m128i *) &in_b[(ii + 1)*nbyte_row + jj]); c0 = _mm_loadu_si128((__m128i *) &in_b[(ii + 2)*nbyte_row + jj]); d0 = _mm_loadu_si128((__m128i *) &in_b[(ii + 3)*nbyte_row + jj]); e0 = _mm_loadu_si128((__m128i *) &in_b[(ii + 4)*nbyte_row + jj]); f0 = _mm_loadu_si128((__m128i *) &in_b[(ii + 5)*nbyte_row + jj]); g0 = _mm_loadu_si128((__m128i *) &in_b[(ii + 6)*nbyte_row + jj]); h0 = _mm_loadu_si128((__m128i *) &in_b[(ii + 7)*nbyte_row + jj]); a1 = _mm_unpacklo_epi8(a0, b0); b1 = _mm_unpacklo_epi8(c0, d0); c1 = _mm_unpacklo_epi8(e0, f0); d1 = _mm_unpacklo_epi8(g0, h0); e1 = _mm_unpackhi_epi8(a0, b0); f1 = _mm_unpackhi_epi8(c0, d0); g1 = _mm_unpackhi_epi8(e0, f0); h1 = _mm_unpackhi_epi8(g0, h0); a0 = _mm_unpacklo_epi16(a1, b1); b0 = _mm_unpacklo_epi16(c1, d1); c0 = _mm_unpackhi_epi16(a1, b1); d0 = _mm_unpackhi_epi16(c1, d1); e0 = _mm_unpacklo_epi16(e1, f1); f0 = _mm_unpacklo_epi16(g1, h1); g0 = _mm_unpackhi_epi16(e1, f1); h0 = _mm_unpackhi_epi16(g1, h1); a1 = _mm_unpacklo_epi32(a0, b0); b1 = _mm_unpackhi_epi32(a0, b0); c1 = _mm_unpacklo_epi32(c0, d0); d1 = _mm_unpackhi_epi32(c0, d0); e1 = _mm_unpacklo_epi32(e0, f0); f1 = _mm_unpackhi_epi32(e0, f0); g1 = _mm_unpacklo_epi32(g0, h0); h1 = _mm_unpackhi_epi32(g0, h0); // We don't have a storeh instruction for integers, so interpret // as a float. Have a storel (_mm_storel_epi64). as = (__m128 *) &a1; bs = (__m128 *) &b1; cs = (__m128 *) &c1; ds = (__m128 *) &d1; es = (__m128 *) &e1; fs = (__m128 *) &f1; gs = (__m128 *) &g1; hs = (__m128 *) &h1; _mm_storel_pi((__m64 *) &out_b[(jj + 0) * nrows + ii], *as); _mm_storel_pi((__m64 *) &out_b[(jj + 2) * nrows + ii], *bs); _mm_storel_pi((__m64 *) &out_b[(jj + 4) * nrows + ii], *cs); _mm_storel_pi((__m64 *) &out_b[(jj + 6) * nrows + ii], *ds); _mm_storel_pi((__m64 *) &out_b[(jj + 8) * nrows + ii], *es); _mm_storel_pi((__m64 *) &out_b[(jj + 10) * nrows + ii], *fs); _mm_storel_pi((__m64 *) &out_b[(jj + 12) * nrows + ii], *gs); _mm_storel_pi((__m64 *) &out_b[(jj + 14) * nrows + ii], *hs); _mm_storeh_pi((__m64 *) &out_b[(jj + 1) * nrows + ii], *as); _mm_storeh_pi((__m64 *) &out_b[(jj + 3) * nrows + ii], *bs); _mm_storeh_pi((__m64 *) &out_b[(jj + 5) * nrows + ii], *cs); _mm_storeh_pi((__m64 *) &out_b[(jj + 7) * nrows + ii], *ds); _mm_storeh_pi((__m64 *) &out_b[(jj + 9) * nrows + ii], *es); _mm_storeh_pi((__m64 *) &out_b[(jj + 11) * nrows + ii], *fs); _mm_storeh_pi((__m64 *) &out_b[(jj + 13) * nrows + ii], *gs); _mm_storeh_pi((__m64 *) &out_b[(jj + 15) * nrows + ii], *hs); } for (jj = nbyte_row - nbyte_row % 16; jj < nbyte_row; jj ++) { out_b[jj * nrows + ii + 0] = in_b[(ii + 0)*nbyte_row + jj]; out_b[jj * nrows + ii + 1] = in_b[(ii + 1)*nbyte_row + jj]; out_b[jj * nrows + ii + 2] = in_b[(ii + 2)*nbyte_row + jj]; out_b[jj * nrows + ii + 3] = in_b[(ii + 3)*nbyte_row + jj]; out_b[jj * nrows + ii + 4] = in_b[(ii + 4)*nbyte_row + jj]; out_b[jj * nrows + ii + 5] = in_b[(ii + 5)*nbyte_row + jj]; out_b[jj * nrows + ii + 6] = in_b[(ii + 6)*nbyte_row + jj]; out_b[jj * nrows + ii + 7] = in_b[(ii + 7)*nbyte_row + jj]; } } return size * elem_size; } /* Shuffle bits within the bytes of eight element blocks. */ int64_t bshuf_shuffle_bit_eightelem_SSE(const void* in, void* out, const size_t size, const size_t elem_size) { CHECK_MULT_EIGHT(size); // With a bit of care, this could be written such that such that it is // in_buf = out_buf safe. const char* in_b = (const char*) in; uint16_t* out_ui16 = (uint16_t*) out; size_t ii, jj, kk; size_t nbyte = elem_size * size; __m128i xmm; int32_t bt; if (elem_size % 2) { bshuf_shuffle_bit_eightelem_scal(in, out, size, elem_size); } else { for (ii = 0; ii + 8 * elem_size - 1 < nbyte; ii += 8 * elem_size) { for (jj = 0; jj + 15 < 8 * elem_size; jj += 16) { xmm = _mm_loadu_si128((__m128i *) &in_b[ii + jj]); for (kk = 0; kk < 8; kk++) { bt = _mm_movemask_epi8(xmm); xmm = _mm_slli_epi16(xmm, 1); size_t ind = (ii + jj / 8 + (7 - kk) * elem_size); out_ui16[ind / 2] = bt; } } } } return size * elem_size; } /* Untranspose bits within elements. */ int64_t bshuf_untrans_bit_elem_SSE(const void* in, void* out, const size_t size, const size_t elem_size) { int64_t count; CHECK_MULT_EIGHT(size); void* tmp_buf = malloc(size * elem_size); if (tmp_buf == NULL) return -1; count = bshuf_trans_byte_bitrow_SSE(in, tmp_buf, size, elem_size); CHECK_ERR_FREE(count, tmp_buf); count = bshuf_shuffle_bit_eightelem_SSE(tmp_buf, out, size, elem_size); free(tmp_buf); return count; } #else // #ifdef USESSE2 int64_t bshuf_untrans_bit_elem_SSE(const void* in, void* out, const size_t size, const size_t elem_size) { return -11; } int64_t bshuf_trans_bit_elem_SSE(const void* in, void* out, const size_t size, const size_t elem_size) { return -11; } int64_t bshuf_trans_byte_bitrow_SSE(const void* in, void* out, const size_t size, const size_t elem_size) { return -11; } int64_t bshuf_trans_bit_byte_SSE(const void* in, void* out, const size_t size, const size_t elem_size) { return -11; } int64_t bshuf_trans_byte_elem_SSE(const void* in, void* out, const size_t size, const size_t elem_size) { return -11; } int64_t bshuf_trans_byte_elem_SSE_64(const void* in, void* out, const size_t size) { return -11; } int64_t bshuf_trans_byte_elem_SSE_32(const void* in, void* out, const size_t size) { return -11; } int64_t bshuf_trans_byte_elem_SSE_16(const void* in, void* out, const size_t size) { return -11; } int64_t bshuf_shuffle_bit_eightelem_SSE(const void* in, void* out, const size_t size, const size_t elem_size) { return -11; } #endif // #ifdef USESSE2 /* ---- Code that requires AVX2. Intel Haswell (2013) and later. ---- */ /* ---- Worker code that uses AVX2 ---- * * The following code makes use of the AVX2 instruction set and specialized * 32 byte registers. The AVX2 instructions are present on newer x86 * processors. The first Intel processor microarchitecture supporting AVX2 was * Haswell (2013). * */ #ifdef USEAVX2 /* Transpose bits within bytes. */ int64_t bshuf_trans_bit_byte_AVX(const void* in, void* out, const size_t size, const size_t elem_size) { size_t ii, kk; const char* in_b = (const char*) in; char* out_b = (char*) out; int32_t* out_i32; size_t nbyte = elem_size * size; int64_t count; __m256i ymm; int32_t bt; for (ii = 0; ii + 31 < nbyte; ii += 32) { ymm = _mm256_loadu_si256((__m256i *) &in_b[ii]); for (kk = 0; kk < 8; kk++) { bt = _mm256_movemask_epi8(ymm); ymm = _mm256_slli_epi16(ymm, 1); out_i32 = (int32_t*) &out_b[((7 - kk) * nbyte + ii) / 8]; *out_i32 = bt; } } count = bshuf_trans_bit_byte_remainder(in, out, size, elem_size, nbyte - nbyte % 32); return count; } /* Transpose bits within elements. */ int64_t bshuf_trans_bit_elem_AVX(const void* in, void* out, const size_t size, const size_t elem_size) { int64_t count; CHECK_MULT_EIGHT(size); void* tmp_buf = malloc(size * elem_size); if (tmp_buf == NULL) return -1; count = bshuf_trans_byte_elem_SSE(in, out, size, elem_size); CHECK_ERR_FREE(count, tmp_buf); count = bshuf_trans_bit_byte_AVX(out, tmp_buf, size, elem_size); CHECK_ERR_FREE(count, tmp_buf); count = bshuf_trans_bitrow_eight(tmp_buf, out, size, elem_size); free(tmp_buf); return count; } /* For data organized into a row for each bit (8 * elem_size rows), transpose * the bytes. */ int64_t bshuf_trans_byte_bitrow_AVX(const void* in, void* out, const size_t size, const size_t elem_size) { size_t hh, ii, jj, kk, mm; const char* in_b = (const char*) in; char* out_b = (char*) out; CHECK_MULT_EIGHT(size); size_t nrows = 8 * elem_size; size_t nbyte_row = size / 8; if (elem_size % 4) return bshuf_trans_byte_bitrow_SSE(in, out, size, elem_size); __m256i ymm_0[8]; __m256i ymm_1[8]; __m256i ymm_storeage[8][4]; for (jj = 0; jj + 31 < nbyte_row; jj += 32) { for (ii = 0; ii + 3 < elem_size; ii += 4) { for (hh = 0; hh < 4; hh ++) { for (kk = 0; kk < 8; kk ++){ ymm_0[kk] = _mm256_loadu_si256((__m256i *) &in_b[ (ii * 8 + hh * 8 + kk) * nbyte_row + jj]); } for (kk = 0; kk < 4; kk ++){ ymm_1[kk] = _mm256_unpacklo_epi8(ymm_0[kk * 2], ymm_0[kk * 2 + 1]); ymm_1[kk + 4] = _mm256_unpackhi_epi8(ymm_0[kk * 2], ymm_0[kk * 2 + 1]); } for (kk = 0; kk < 2; kk ++){ for (mm = 0; mm < 2; mm ++){ ymm_0[kk * 4 + mm] = _mm256_unpacklo_epi16( ymm_1[kk * 4 + mm * 2], ymm_1[kk * 4 + mm * 2 + 1]); ymm_0[kk * 4 + mm + 2] = _mm256_unpackhi_epi16( ymm_1[kk * 4 + mm * 2], ymm_1[kk * 4 + mm * 2 + 1]); } } for (kk = 0; kk < 4; kk ++){ ymm_1[kk * 2] = _mm256_unpacklo_epi32(ymm_0[kk * 2], ymm_0[kk * 2 + 1]); ymm_1[kk * 2 + 1] = _mm256_unpackhi_epi32(ymm_0[kk * 2], ymm_0[kk * 2 + 1]); } for (kk = 0; kk < 8; kk ++){ ymm_storeage[kk][hh] = ymm_1[kk]; } } for (mm = 0; mm < 8; mm ++) { for (kk = 0; kk < 4; kk ++){ ymm_0[kk] = ymm_storeage[mm][kk]; } ymm_1[0] = _mm256_unpacklo_epi64(ymm_0[0], ymm_0[1]); ymm_1[1] = _mm256_unpacklo_epi64(ymm_0[2], ymm_0[3]); ymm_1[2] = _mm256_unpackhi_epi64(ymm_0[0], ymm_0[1]); ymm_1[3] = _mm256_unpackhi_epi64(ymm_0[2], ymm_0[3]); ymm_0[0] = _mm256_permute2x128_si256(ymm_1[0], ymm_1[1], 32); ymm_0[1] = _mm256_permute2x128_si256(ymm_1[2], ymm_1[3], 32); ymm_0[2] = _mm256_permute2x128_si256(ymm_1[0], ymm_1[1], 49); ymm_0[3] = _mm256_permute2x128_si256(ymm_1[2], ymm_1[3], 49); _mm256_storeu_si256((__m256i *) &out_b[ (jj + mm * 2 + 0 * 16) * nrows + ii * 8], ymm_0[0]); _mm256_storeu_si256((__m256i *) &out_b[ (jj + mm * 2 + 0 * 16 + 1) * nrows + ii * 8], ymm_0[1]); _mm256_storeu_si256((__m256i *) &out_b[ (jj + mm * 2 + 1 * 16) * nrows + ii * 8], ymm_0[2]); _mm256_storeu_si256((__m256i *) &out_b[ (jj + mm * 2 + 1 * 16 + 1) * nrows + ii * 8], ymm_0[3]); } } } for (ii = 0; ii < nrows; ii ++ ) { for (jj = nbyte_row - nbyte_row % 32; jj < nbyte_row; jj ++) { out_b[jj * nrows + ii] = in_b[ii * nbyte_row + jj]; } } return size * elem_size; } /* Shuffle bits within the bytes of eight element blocks. */ int64_t bshuf_shuffle_bit_eightelem_AVX(const void* in, void* out, const size_t size, const size_t elem_size) { CHECK_MULT_EIGHT(size); // With a bit of care, this could be written such that such that it is // in_buf = out_buf safe. const char* in_b = (const char*) in; char* out_b = (char*) out; size_t ii, jj, kk; size_t nbyte = elem_size * size; __m256i ymm; int32_t bt; if (elem_size % 4) { return bshuf_shuffle_bit_eightelem_SSE(in, out, size, elem_size); } else { for (jj = 0; jj + 31 < 8 * elem_size; jj += 32) { for (ii = 0; ii + 8 * elem_size - 1 < nbyte; ii += 8 * elem_size) { ymm = _mm256_loadu_si256((__m256i *) &in_b[ii + jj]); for (kk = 0; kk < 8; kk++) { bt = _mm256_movemask_epi8(ymm); ymm = _mm256_slli_epi16(ymm, 1); size_t ind = (ii + jj / 8 + (7 - kk) * elem_size); * (int32_t *) &out_b[ind] = bt; } } } } return size * elem_size; } /* Untranspose bits within elements. */ int64_t bshuf_untrans_bit_elem_AVX(const void* in, void* out, const size_t size, const size_t elem_size) { int64_t count; CHECK_MULT_EIGHT(size); void* tmp_buf = malloc(size * elem_size); if (tmp_buf == NULL) return -1; count = bshuf_trans_byte_bitrow_AVX(in, tmp_buf, size, elem_size); CHECK_ERR_FREE(count, tmp_buf); count = bshuf_shuffle_bit_eightelem_AVX(tmp_buf, out, size, elem_size); free(tmp_buf); return count; } #else // #ifdef USEAVX2 int64_t bshuf_trans_bit_byte_AVX(const void* in, void* out, const size_t size, const size_t elem_size) { return -12; } int64_t bshuf_trans_bit_elem_AVX(const void* in, void* out, const size_t size, const size_t elem_size) { return -12; } int64_t bshuf_trans_byte_bitrow_AVX(const void* in, void* out, const size_t size, const size_t elem_size) { return -12; } int64_t bshuf_shuffle_bit_eightelem_AVX(const void* in, void* out, const size_t size, const size_t elem_size) { return -12; } int64_t bshuf_untrans_bit_elem_AVX(const void* in, void* out, const size_t size, const size_t elem_size) { return -12; } #endif // #ifdef USEAVX2 /* ---- Drivers selecting best instruction set at compile time. ---- */ int64_t bshuf_trans_bit_elem(const void* in, void* out, const size_t size, const size_t elem_size) { int64_t count; #ifdef USEAVX2 count = bshuf_trans_bit_elem_AVX(in, out, size, elem_size); #elif defined(USESSE2) count = bshuf_trans_bit_elem_SSE(in, out, size, elem_size); #elif defined(USEARMNEON) count = bshuf_trans_bit_elem_NEON(in, out, size, elem_size); #else count = bshuf_trans_bit_elem_scal(in, out, size, elem_size); #endif return count; } int64_t bshuf_untrans_bit_elem(const void* in, void* out, const size_t size, const size_t elem_size) { int64_t count; #ifdef USEAVX2 count = bshuf_untrans_bit_elem_AVX(in, out, size, elem_size); #elif defined(USESSE2) count = bshuf_untrans_bit_elem_SSE(in, out, size, elem_size); #elif defined(USEARMNEON) count = bshuf_untrans_bit_elem_NEON(in, out, size, elem_size); #else count = bshuf_untrans_bit_elem_scal(in, out, size, elem_size); #endif return count; } /* ---- Wrappers for implementing blocking ---- */ /* Wrap a function for processing a single block to process an entire buffer in * parallel. */ int64_t bshuf_blocked_wrap_fun(bshufBlockFunDef fun, const void* in, void* out, \ const size_t size, const size_t elem_size, size_t block_size) { omp_size_t ii = 0; int64_t err = 0; int64_t count, cum_count=0; size_t last_block_size; size_t leftover_bytes; size_t this_iter; char *last_in; char *last_out; ioc_chain C; ioc_init(&C, in, out); if (block_size == 0) { block_size = bshuf_default_block_size(elem_size); } if (block_size % BSHUF_BLOCKED_MULT) return -81; #if defined(_OPENMP) char *cenv; int nthreads = omp_get_num_threads(); cenv = getenv("BITSHUFFLE_OMP_NUM_THREADS"); if (cenv!=NULL) { nthreads = atoi(cenv); } #pragma omp parallel for schedule(dynamic, 1) \ private(count) num_threads(nthreads) reduction(+ : cum_count) #endif for (ii = 0; ii < (omp_size_t)( size / block_size ); ii ++) { count = fun(&C, block_size, elem_size); if (count < 0) err = count; cum_count += count; } last_block_size = size % block_size; last_block_size = last_block_size - last_block_size % BSHUF_BLOCKED_MULT; if (last_block_size) { count = fun(&C, last_block_size, elem_size); if (count < 0) err = count; cum_count += count; } if (err < 0) return err; leftover_bytes = size % BSHUF_BLOCKED_MULT * elem_size; //this_iter; last_in = (char *) ioc_get_in(&C, &this_iter); ioc_set_next_in(&C, &this_iter, (void *) (last_in + leftover_bytes)); last_out = (char *) ioc_get_out(&C, &this_iter); ioc_set_next_out(&C, &this_iter, (void *) (last_out + leftover_bytes)); memcpy(last_out, last_in, leftover_bytes); ioc_destroy(&C); return cum_count + leftover_bytes; } /* Bitshuffle a single block. */ int64_t bshuf_bitshuffle_block(ioc_chain *C_ptr, \ const size_t size, const size_t elem_size) { size_t this_iter; const void *in; void *out; int64_t count; in = ioc_get_in(C_ptr, &this_iter); ioc_set_next_in(C_ptr, &this_iter, (void*) ((char*) in + size * elem_size)); out = ioc_get_out(C_ptr, &this_iter); ioc_set_next_out(C_ptr, &this_iter, (void *) ((char *) out + size * elem_size)); count = bshuf_trans_bit_elem(in, out, size, elem_size); return count; } /* Bitunshuffle a single block. */ int64_t bshuf_bitunshuffle_block(ioc_chain* C_ptr, \ const size_t size, const size_t elem_size) { size_t this_iter; const void *in; void *out; int64_t count; in = ioc_get_in(C_ptr, &this_iter); ioc_set_next_in(C_ptr, &this_iter, (void*) ((char*) in + size * elem_size)); out = ioc_get_out(C_ptr, &this_iter); ioc_set_next_out(C_ptr, &this_iter, (void *) ((char *) out + size * elem_size)); count = bshuf_untrans_bit_elem(in, out, size, elem_size); return count; } /* Write a 64 bit unsigned integer to a buffer in big endian order. */ void bshuf_write_uint64_BE(void* buf, uint64_t num) { int ii; uint8_t* b = (uint8_t*) buf; uint64_t pow28 = 1 << 8; for (ii = 7; ii >= 0; ii--) { b[ii] = num % pow28; num = num / pow28; } } /* Read a 64 bit unsigned integer from a buffer big endian order. */ uint64_t bshuf_read_uint64_BE(void* buf) { int ii; uint8_t* b = (uint8_t*) buf; uint64_t num = 0, pow28 = 1 << 8, cp = 1; for (ii = 7; ii >= 0; ii--) { num += b[ii] * cp; cp *= pow28; } return num; } /* Write a 32 bit unsigned integer to a buffer in big endian order. */ void bshuf_write_uint32_BE(void* buf, uint32_t num) { int ii; uint8_t* b = (uint8_t*) buf; uint32_t pow28 = 1 << 8; for (ii = 3; ii >= 0; ii--) { b[ii] = num % pow28; num = num / pow28; } } /* Read a 32 bit unsigned integer from a buffer big endian order. */ uint32_t bshuf_read_uint32_BE(const void* buf) { int ii; uint8_t* b = (uint8_t*) buf; uint32_t num = 0, pow28 = 1 << 8, cp = 1; for (ii = 3; ii >= 0; ii--) { num += b[ii] * cp; cp *= pow28; } return num; } /* ---- Public functions ---- * * See header file for description and usage. * */ size_t bshuf_default_block_size(const size_t elem_size) { // This function needs to be absolutely stable between versions. // Otherwise encoded data will not be decodable. size_t block_size = BSHUF_TARGET_BLOCK_SIZE_B / elem_size; // Ensure it is a required multiple. block_size = (block_size / BSHUF_BLOCKED_MULT) * BSHUF_BLOCKED_MULT; return MAX(block_size, BSHUF_MIN_RECOMMEND_BLOCK); } int64_t bshuf_bitshuffle(const void* in, void* out, const size_t size, const size_t elem_size, size_t block_size) { return bshuf_blocked_wrap_fun(&bshuf_bitshuffle_block, in, out, size, elem_size, block_size); } int64_t bshuf_bitunshuffle(const void* in, void* out, const size_t size, const size_t elem_size, size_t block_size) { return bshuf_blocked_wrap_fun(&bshuf_bitunshuffle_block, in, out, size, elem_size, block_size); } #undef TRANS_BIT_8X8 #undef TRANS_ELEM_TYPE #undef MAX #undef CHECK_MULT_EIGHT #undef CHECK_ERR_FREE #undef USESSE2 #undef USEAVX2
CB_PackingVectorNode.h
#ifndef _CB_PACK_V_NODE_H_ #define _CB_PACK_V_NODE_H_ /* ################################################################################### # # CBrick # # Copyright (c) 2017-2020 Research Institute for Information Technology(RIIT), # Kyushu University. All rights reserved. # #################################################################################### */ /* * @file CB_PackingVectorNode.h * @brief BrickComm class */ // ######################################################### /* * @brief pack send data for I direction * @param [in] array source array * @param [in] gc number of guide cell layer to be sent * @param [out] sendm send buffer of I- direction * @param [out] sendp send buffer of I+ direction * @param [in] nIDm Rank number of I- direction * @param [in] nIDp Rank number of I+ direction */ template <class T> void BrickComm::pack_VXnode(const T *array, const int gc, T *sendm, T *sendp, const int nIDm, const int nIDp) { int NI = size[0]; int NJ = size[1]; int NK = size[2]; int VC = halo_width; /* <--gc--> rankA [NI-3] [NI-2] [NI-1] [NI] [NI+1] -----+------+------|------+------+-------> i rankB [-2] [-1] [0] [1] [2] <--gc--> */ if( nIDm >= 0 ) { #pragma omp parallel for collapse(3) for (int l=0; l<3; l++) { for( int k=0; k<NK; k++ ){ for( int j=0; j<NJ; j++ ){ #pragma novector for( int i=0; i<gc; i++ ){ sendm[_IDX_VI(i,j,k,l,NJ,NK,gc)] = array[_IDX_V3D(i+1,j,k,l,NI,NJ,NK,VC)]; } } } } } if( nIDp >= 0 ) { #pragma omp parallel for collapse(3) for (int l=0; l<3; l++) { for( int k=0; k<NK; k++ ){ for( int j=0; j<NJ; j++ ){ #pragma novector for( int i=0; i<gc; i++ ){ sendp[_IDX_VI(i,j,k,l,NJ,NK,gc)] = array[_IDX_V3D(NI-2+i,j,k,l,NI,NJ,NK,VC)]; } } } } } } // ######################################################### /* * @brief unpack send data for I direction * @param [out] array dest array * @param [in] gc number of guide cell layer to be sent * @param [in] recvm recv buffer of I- direction * @param [in] recvp recv buffer of I+ direction * @param [in] nIDm Rank number of I- direction * @param [in] nIDp Rank number of I+ direction */ template <class T> void BrickComm::unpack_VXnode(T *array, const int gc, const T *recvm, const T *recvp, const int nIDm, const int nIDp) { int NI = size[0]; int NJ = size[1]; int NK = size[2]; int VC = halo_width; if( nIDm >= 0 ) { #pragma omp parallel for collapse(3) for (int l=0; l<3; l++) { for( int k=0; k<NK; k++ ){ for( int j=0; j<NJ; j++ ){ #pragma novector for( int i=0; i<gc; i++ ){ array[_IDX_V3D(i-1,j,k,l,NI,NJ,NK,VC)] = recvm[_IDX_VI(i,j,k,l,NJ,NK,gc)]; } } } } } if( nIDp >= 0 ) { #pragma omp parallel for collapse(3) for (int l=0; l<3; l++) { for( int k=0; k<NK; k++ ){ for( int j=0; j<NJ; j++ ){ #pragma novector for( int i=0; i<gc; i++ ){ array[_IDX_V3D(NI+i,j,k,l,NI,NJ,NK,VC)] = recvp[_IDX_VI(i,j,k,l,NJ,NK,gc)]; } } } } } } // ######################################################### /* * @brief pack send data for J direction * @param [in] array source array * @param [in] gc number of guide cell layer to be sent * @param [out] sendm send buffer of J- direction * @param [out] sendp send buffer of J+ direction * @param [in] nIDm Rank number of J- direction * @param [in] nIDp Rank number of J+ direction */ template <class T> void BrickComm::pack_VYnode(const T *array, const int gc, T *sendm, T *sendp, const int nIDm, const int nIDp) { int NI = size[0]; int NJ = size[1]; int NK = size[2]; int VC = halo_width; if( nIDm >= 0 ) { #pragma omp parallel for collapse(3) for (int l=0; l<3; l++) { for( int k=0; k<NK; k++ ){ for( int j=0; j<gc; j++ ){ #pragma novector for( int i=0; i<NI; i++ ){ sendm[_IDX_VJ(i,j,k,l,NI,NK,gc)] = array[_IDX_V3D(i,j+1,k,l,NI,NJ,NK,VC)]; } } } } } if( nIDp >= 0 ) { #pragma omp parallel for collapse(3) for (int l=0; l<3; l++) { for( int k=0; k<NK; k++ ){ for( int j=0; j<gc; j++ ){ #pragma novector for( int i=0; i<NI; i++ ){ sendp[_IDX_VJ(i,j,k,l,NI,NK,gc)] = array[_IDX_V3D(i,NJ-2+j,k,l,NI,NJ,NK,VC)]; } } } } } } // ######################################################### /* * @brief unpack send data for J direction * @param [out] array dest array * @param [in] gc number of guide cell layer to be sent * @param [in] recvm recv buffer of J- direction * @param [in] recvp recv buffer of J+ direction * @param [in] nIDm Rank number of J- direction * @param [in] nIDp Rank number of J+ direction */ template <class T> void BrickComm::unpack_VYnode(T *array, const int gc, const T *recvm, const T *recvp, const int nIDm, const int nIDp) { int NI = size[0]; int NJ = size[1]; int NK = size[2]; int VC = halo_width; if( nIDm >= 0 ) { #pragma omp parallel for collapse(3) for (int l=0; l<3; l++) { for( int k=0; k<NK; k++ ){ for( int j=0; j<gc; j++ ){ #pragma novector for( int i=0; i<NI; i++ ){ array[_IDX_V3D(i,j-1,k,l,NI,NJ,NK,VC)] = recvm[_IDX_VJ(i,j,k,l,NI,NK,gc)]; } } } } } if( nIDp >= 0 ) { #pragma omp parallel for collapse(3) for (int l=0; l<3; l++) { for( int k=0; k<NK; k++ ){ for( int j=0; j<gc; j++ ){ #pragma novector for( int i=0; i<NI; i++ ){ array[_IDX_V3D(i,NJ+j,k,l,NI,NJ,NK,VC)] = recvp[_IDX_VJ(i,j,k,l,NI,NK,gc)]; } } } } } } // ######################################################### /* * @brief pack send data for K direction * @param [in] array source array * @param [in] gc number of guide cell layer actually to be sent * @param [out] sendm send buffer of K- direction * @param [out] sendp send buffer of K+ direction * @param [in] nIDm Rank number of K- direction * @param [in] nIDp Rank number of K+ direction */ template <class T> void BrickComm::pack_VZnode(const T *array, const int gc, T *sendm, T *sendp, const int nIDm, const int nIDp) { int NI = size[0]; int NJ = size[1]; int NK = size[2]; int VC = halo_width; if( nIDm >= 0 ) { #pragma omp parallel for collapse(3) for (int l=0; l<3; l++) { for( int k=0; k<gc; k++ ){ for( int j=0; j<NJ; j++ ){ #pragma novector for( int i=0; i<NI; i++ ){ sendm[_IDX_VK(i,j,k,l,NI,NJ,gc)] = array[_IDX_V3D(i,j,k+1,l,NI,NJ,NK,VC)]; } } } } } if( nIDp >= 0 ) { #pragma omp parallel for collapse(3) for (int l=0; l<3; l++) { for( int k=0; k<gc; k++ ){ for( int j=0; j<NJ; j++ ){ #pragma novector for( int i=0; i<NI; i++ ){ sendp[_IDX_VK(i,j,k,l,NI,NJ,gc)] = array[_IDX_V3D(i,j,NK-2+k,l,NI,NJ,NK,VC)]; } } } } } } // ######################################################### /* * @brief unpack send data for K direction * @param [out] array dest array * @param [in] gc number of guide cell layer to be sent * @param [in] recvm recv buffer of K- direction * @param [in] recvp recv buffer of K+ direction * @param [in] nIDm Rank number of K- direction * @param [in] nIDp Rank number of K+ direction */ template <class T> void BrickComm::unpack_VZnode(T *array, const int gc, const T *recvm, const T *recvp, const int nIDm, const int nIDp) { int NI = size[0]; int NJ = size[1]; int NK = size[2]; int VC = halo_width; if( nIDm >= 0 ) { #pragma omp parallel for collapse(3) for (int l=0; l<3; l++) { for( int k=0; k<gc; k++ ){ for( int j=0; j<NJ; j++ ){ #pragma novector for( int i=0; i<NI; i++ ){ array[_IDX_V3D(i,j,k-1,l,NI,NJ,NK,VC)] = recvm[_IDX_VK(i,j,k,l,NI,NJ,gc)]; } } } } } if( nIDp >= 0 ) { #pragma omp parallel for collapse(3) for (int l=0; l<3; l++) { for( int k=0; k<gc; k++ ){ for( int j=0; j<NJ; j++ ){ #pragma novector for( int i=0; i<NI; i++ ){ array[_IDX_V3D(i,j,NK+k,l,NI,NJ,NK,VC)] = recvp[_IDX_VK(i,j,k,l,NI,NJ,gc)]; } } } } } } #ifdef _DIAGONAL_COMM // ######################################################### /* * @brief pack send data for diagonal edge * @param [in] array source array * @param [in] gc number of guide cell layer to be sent * @param [out] sendbuf send buffer * @param [out] recvbuf recv buffer * @param [out] req Array of MPI request * @retval true-success, false-fail */ template <class T> bool BrickComm::pack_VEnode(T *array, const int gc, T *sendbuf, T *recvbuf, MPI_Request *req) { int NI = size[0]; int NJ = size[1]; int NK = size[2]; int VC = halo_width; int tag = 0; size_t ptr = 0; //// X edge //// for( int dir=int(E_mYmZ);dir<=int(E_pYpZ);dir++ ) { if( comm_tbl[dir] >= 0 ) { T *sendptr = &sendbuf[ptr]; T *recvptr = &recvbuf[ptr]; size_t sz = (NI-1) * gc * gc * 3; /* recv if ( MPI_SUCCESS != MPI_Irecv(recvptr, sz, dtype, comm_tbl[dir], tag, MPI_COMM_WORLD, &req[dir*2]) ) return false; */ if ( !IrecvData(recvptr, sz, comm_tbl[dir], &req[dir*2]) ) return false; // pack switch(dir) { case int(E_mYmZ): #pragma omp parallel for collapse(4) for( int l=0; l<3; l++ ){ for( int k=1; k<=gc; k++ ){ for( int j=1; j<=gc; j++ ){ for( int i=1; i<NI; i++ ){ sendptr[_IDX_V3D(i-1,j-1,k-1,l,NI-1,gc,gc,0)] = array[_IDX_V3D(i,j,k,l,NI,NJ,NK,VC)]; } } } } break; case int(E_pYmZ): #pragma omp parallel for collapse(4) for( int l=0; l<3; l++ ){ for( int k=1; k<=gc; k++ ){ for( int j=NJ-gc; j<NJ; j++ ){ for( int i=1; i<NI; i++ ){ sendptr[_IDX_V3D(i-1,j-(NJ-gc),k-1,l,NI-1,gc,gc,0)] = array[_IDX_V3D(i,j,k,l,NI,NJ,NK,VC)]; } } } } break; case int(E_mYpZ): #pragma omp parallel for collapse(4) for( int l=0; l<3; l++ ){ for( int k=NK-gc; k<NK; k++ ){ for( int j=1; j<=gc; j++ ){ for( int i=1; i<NI; i++ ){ sendptr[_IDX_V3D(i-1,j-1,k-(NK-gc),l,NI-1,gc,gc,0)] = array[_IDX_V3D(i,j,k,l,NI,NJ,NK,VC)]; } } } } break; case int(E_pYpZ): #pragma omp parallel for collapse(4) for( int l=0; l<3; l++ ){ for( int k=NK-gc; k<NK; k++ ){ for( int j=NJ-gc; j<NJ; j++ ){ for( int i=1; i<NI; i++ ){ sendptr[_IDX_V3D(i-1,j-(NJ-gc),k-(NK-gc),l,NI-1,gc,gc,0)] = array[_IDX_V3D(i,j,k,l,NI,NJ,NK,VC)]; } } } } break; } /* send if ( MPI_SUCCESS != MPI_Isend(sendptr, sz, dtype, comm_tbl[dir], tag, MPI_COMM_WORLD, &req[dir*2+1]) ) return false; */ if ( !IsendData(sendptr, sz, comm_tbl[dir], &req[dir*2+1]) ) return false; // pointer ptr += sz; } } //// Y edge //// for( int dir=int(E_mXmZ);dir<=int(E_pXpZ);dir++ ) { if( comm_tbl[dir] >= 0 ) { T *sendptr = &sendbuf[ptr]; T *recvptr = &recvbuf[ptr]; size_t sz = gc * (NJ-1) * gc * 3; /* recv if ( MPI_SUCCESS != MPI_Irecv(recvptr, sz, dtype, comm_tbl[dir], tag, MPI_COMM_WORLD, &req[dir*2]) ) return false; */ if ( !IrecvData(recvptr, sz, comm_tbl[dir], &req[dir*2]) ) return false; // pack switch(dir) { case int(E_mXmZ): #pragma omp parallel for collapse(4) for( int l=0; l<3; l++ ){ for( int k=1; k<=gc; k++ ){ for( int j=1; j<NJ; j++ ){ for( int i=1; i<=gc; i++ ){ sendptr[_IDX_V3D(i-1,j-1,k-1,l,gc,NJ-1,gc,0)] = array[_IDX_V3D(i,j,k,l,NI,NJ,NK,VC)]; } } } } break; case int(E_pXmZ): #pragma omp parallel for collapse(4) for( int l=0; l<3; l++ ){ for( int k=1; k<=gc; k++ ){ for( int j=1; j<NJ; j++ ){ for( int i=NI-gc; i<NI; i++ ){ sendptr[_IDX_V3D(i-(NI-gc),j-1,k-1,l,gc,NJ-1,gc,0)] = array[_IDX_V3D(i,j,k,l,NI,NJ,NK,VC)]; } } } } break; case int(E_mXpZ): #pragma omp parallel for collapse(4) for( int l=0; l<3; l++ ){ for( int k=NK-gc; k<NK; k++ ){ for( int j=1; j<NJ; j++ ){ for( int i=1; i<=gc; i++ ){ sendptr[_IDX_V3D(i-1,j-1,k-(NK-gc),l,gc,NJ-1,gc,0)] = array[_IDX_V3D(i,j,k,l,NI,NJ,NK,VC)]; } } } } break; case int(E_pXpZ): #pragma omp parallel for collapse(4) for( int l=0; l<3; l++ ){ for( int k=NK-gc; k<NK; k++ ){ for( int j=1; j<NJ; j++ ){ for( int i=NI-gc; i<NI; i++ ){ sendptr[_IDX_V3D(i-(NI-gc),j-1,k-(NK-gc),l,gc,NJ-1,gc,0)] = array[_IDX_V3D(i,j,k,l,NI,NJ,NK,VC)]; } } } } break; } /* send if ( MPI_SUCCESS != MPI_Isend(sendptr, sz, dtype, comm_tbl[dir], tag, MPI_COMM_WORLD, &req[dir*2+1]) ) return false; */ if ( !IsendData(sendptr, sz, comm_tbl[dir], &req[dir*2+1]) ) return false; // pointer ptr += sz; } } //// Z edge //// for( int dir=int(E_mXmY);dir<=int(E_pXpY);dir++ ) { if( comm_tbl[dir] >= 0 ) { T *sendptr = &sendbuf[ptr]; T *recvptr = &recvbuf[ptr]; size_t sz = gc * gc * (NK-1) * 3; /* recv if ( MPI_SUCCESS != MPI_Irecv(recvptr, sz, dtype, comm_tbl[dir], tag, MPI_COMM_WORLD, &req[dir*2]) ) return false; */ if ( !IrecvData(recvptr, sz, comm_tbl[dir], &req[dir*2]) ) return false; // pack switch(dir) { case int(E_mXmY): #pragma omp parallel for collapse(4) for( int l=0; l<3; l++ ){ for( int k=1; k<NK; k++ ){ for( int j=1; j<=gc; j++ ){ for( int i=1; i<=gc; i++ ){ sendptr[_IDX_V3D(i-1,j-1,k-1,l,gc,gc,NK-1,0)] = array[_IDX_V3D(i,j,k,l,NI,NJ,NK,VC)]; } } } } break; case int(E_pXmY): #pragma omp parallel for collapse(4) for( int l=0; l<3; l++ ){ for( int k=1; k<NK; k++ ){ for( int j=1; j<=gc; j++ ){ for( int i=NI-gc; i<NI; i++ ){ sendptr[_IDX_V3D(i-(NI-gc),j-1,k-1,l,gc,gc,NK-1,0)] = array[_IDX_V3D(i,j,k,l,NI,NJ,NK,VC)]; } } } } break; case int(E_mXpY): #pragma omp parallel for collapse(4) for( int l=0; l<3; l++ ){ for( int k=1; k<NK; k++ ){ for( int j=NJ-gc; j<NJ; j++ ){ for( int i=1; i<=gc; i++ ){ sendptr[_IDX_V3D(i-1,j-(NJ-gc),k-1,l,gc,gc,NK-1,0)] = array[_IDX_V3D(i,j,k,l,NI,NJ,NK,VC)]; } } } } break; case int(E_pXpY): #pragma omp parallel for collapse(4) for( int l=0; l<3; l++ ){ for( int k=1; k<NK; k++ ){ for( int j=NJ-gc; j<NJ; j++ ){ for( int i=NI-gc; i<NI; i++ ){ sendptr[_IDX_V3D(i-(NI-gc),j-(NJ-gc),k-1,l,gc,gc,NK-1,0)] = array[_IDX_V3D(i,j,k,l,NI,NJ,NK,VC)]; } } } } break; } /* send if ( MPI_SUCCESS != MPI_Isend(sendptr, sz, dtype, comm_tbl[dir], tag, MPI_COMM_WORLD, &req[dir*2+1]) ) return false; */ if ( !IsendData(sendptr, sz, comm_tbl[dir], &req[dir*2+1]) ) return false; // pointer ptr += sz; } } return true; } // ######################################################### /* * @brief unpack send data for diagonal edge * @param [out] array dest array * @param [in] gc number of guide cell layer to be sent * @param [in] recvbuf recv buffer */ template <class T> void BrickComm::unpack_VEnode(T *array, const int gc, const T *recvbuf) { int NI = size[0]; int NJ = size[1]; int NK = size[2]; int VC = halo_width; size_t ptr = 0; //// X edge //// for( int dir=int(E_mYmZ);dir<=int(E_pYpZ);dir++ ) { if( comm_tbl[dir] >= 0 ) { const T *recvptr = &recvbuf[ptr]; size_t sz = (NI-1) * gc * gc * 3; // unpack switch(dir) { case int(E_mYmZ): #pragma omp parallel for collapse(4) for( int l=0; l<3; l++ ){ for( int k=1-gc; k<=0; k++ ){ for( int j=1-gc; j<=0; j++ ){ for( int i=1; i<NI; i++ ){ array[_IDX_V3D(i,j,k,l,NI,NJ,NK,VC)] = recvptr[_IDX_V3D(i-1,j-(1-gc),k-(1-gc),l,NI-1,gc,gc,0)]; } } } } break; case int(E_pYmZ): #pragma omp parallel for collapse(4) for( int l=0; l<3; l++ ){ for( int k=1-gc; k<=0; k++ ){ for( int j=NJ; j<NJ+gc; j++ ){ for( int i=1; i<NI; i++ ){ array[_IDX_V3D(i,j,k,l,NI,NJ,NK,VC)] = recvptr[_IDX_V3D(i-1,j-(NJ),k-(1-gc),l,NI-1,gc,gc,0)]; } } } } break; case int(E_mYpZ): #pragma omp parallel for collapse(4) for( int l=0; l<3; l++ ){ for( int k=NK; k<NK+gc; k++ ){ for( int j=1-gc; j<=0; j++ ){ for( int i=1; i<NI; i++ ){ array[_IDX_V3D(i,j,k,l,NI,NJ,NK,VC)] = recvptr[_IDX_V3D(i-1,j-(1-gc),k-(NK),l,NI-1,gc,gc,0)]; } } } } break; case int(E_pYpZ): #pragma omp parallel for collapse(4) for( int l=0; l<3; l++ ){ for( int k=NK; k<NK+gc; k++ ){ for( int j=NJ; j<NJ+gc; j++ ){ for( int i=1; i<NI; i++ ){ array[_IDX_V3D(i,j,k,l,NI,NJ,NK,VC)] = recvptr[_IDX_V3D(i-1,j-(NJ),k-(NK),l,NI-1,gc,gc,0)]; } } } } break; } ptr += sz; } } //// Y edge //// for( int dir=int(E_mXmZ);dir<=int(E_pXpZ);dir++ ) { if( comm_tbl[dir] >= 0 ) { const T *recvptr = &recvbuf[ptr]; size_t sz = gc * (NJ-1) * gc * 3; // unpack switch(dir) { case int(E_mXmZ): #pragma omp parallel for collapse(4) for( int l=0; l<3; l++ ){ for( int k=1-gc; k<=0; k++ ){ for( int j=1; j<NJ; j++ ){ for( int i=1-gc; i<=0; i++ ){ array[_IDX_V3D(i,j,k,l,NI,NJ,NK,VC)] = recvptr[_IDX_V3D(i-(1-gc),j-1,k-(1-gc),l,gc,NJ-1,gc,0)]; } } } } break; case int(E_pXmZ): #pragma omp parallel for collapse(4) for( int l=0; l<3; l++ ){ for( int k=1-gc; k<=0; k++ ){ for( int j=1; j<NJ; j++ ){ for( int i=NI; i<NI+gc; i++ ){ array[_IDX_V3D(i,j,k,l,NI,NJ,NK,VC)] = recvptr[_IDX_V3D(i-(NI),j-1,k-(1-gc),l,gc,NJ-1,gc,0)]; } } } } break; case int(E_mXpZ): #pragma omp parallel for collapse(4) for( int l=0; l<3; l++ ){ for( int k=NK; k<NK+gc; k++ ){ for( int j=1; j<NJ; j++ ){ for( int i=1-gc; i<=0; i++ ){ array[_IDX_V3D(i,j,k,l,NI,NJ,NK,VC)] = recvptr[_IDX_V3D(i-(1-gc),j-1,k-(NK),l,gc,NJ-1,gc,0)]; } } } } break; case int(E_pXpZ): #pragma omp parallel for collapse(4) for( int l=0; l<3; l++ ){ for( int k=NK; k<NK+gc; k++ ){ for( int j=1; j<NJ; j++ ){ for( int i=NI; i<NI+gc; i++ ){ array[_IDX_V3D(i,j,k,l,NI,NJ,NK,VC)] = recvptr[_IDX_V3D(i-(NI),j-1,k-(NK),l,gc,NJ-1,gc,0)]; } } } } break; } ptr += sz; } } //// Z edge //// for( int dir=int(E_mXmY);dir<=int(E_pXpY);dir++ ) { if( comm_tbl[dir] >= 0 ) { const T *recvptr = &recvbuf[ptr]; size_t sz = gc * gc * (NK-1) * 3; // unpack switch(dir) { case int(E_mXmY): #pragma omp parallel for collapse(4) for( int l=0; l<3; l++ ){ for( int k=1; k<NK; k++ ){ for( int j=1-gc; j<=0; j++ ){ for( int i=1-gc; i<=0; i++ ){ array[_IDX_V3D(i,j,k,l,NI,NJ,NK,VC)] = recvptr[_IDX_V3D(i-(1-gc),j-(1-gc),k-1,l,gc,gc,NK-1,0)]; } } } } break; case int(E_pXmY): #pragma omp parallel for collapse(4) for( int l=0; l<3; l++ ){ for( int k=1; k<NK; k++ ){ for( int j=1-gc; j<=0; j++ ){ for( int i=NI; i<NI+gc; i++ ){ array[_IDX_V3D(i,j,k,l,NI,NJ,NK,VC)] = recvptr[_IDX_V3D(i-(NI),j-(1-gc),k-1,l,gc,gc,NK-1,0)]; } } } } break; case int(E_mXpY): #pragma omp parallel for collapse(4) for( int l=0; l<3; l++ ){ for( int k=1; k<NK; k++ ){ for( int j=NJ; j<NJ+gc; j++ ){ for( int i=1-gc; i<=0; i++ ){ array[_IDX_V3D(i,j,k,l,NI,NJ,NK,VC)] = recvptr[_IDX_V3D(i-(1-gc),j-(NJ),k-1,l,gc,gc,NK-1,0)]; } } } } break; case int(E_pXpY): #pragma omp parallel for collapse(4) for( int l=0; l<3; l++ ){ for( int k=1; k<NK; k++ ){ for( int j=NJ; j<NJ+gc; j++ ){ for( int i=NI; i<NI+gc; i++ ){ array[_IDX_V3D(i,j,k,l,NI,NJ,NK,VC)] = recvptr[_IDX_V3D(i-(NI),j-(NJ),k-1,l,gc,gc,NK-1,0)]; } } } } break; } ptr += sz; } } } // ######################################################### /* * @brief pack send data for diagonal corner * @param [in] array source array * @param [in] gc number of guide cell layer to be sent * @param [out] sendbuf send buffer * @param [out] recvbuf recv buffer * @param [out] req Array of MPI request * @retval true-success, false-fail */ template <class T> bool BrickComm::pack_VCnode(T *array, const int gc, T *sendbuf, T *recvbuf, MPI_Request *req) { int NI = size[0]; int NJ = size[1]; int NK = size[2]; int VC = halo_width; int tag = 0; size_t ptr = 0; //// 8 corner //// for( int dir=int(C_mXmYmZ);dir<=int(C_pXpYpZ);dir++ ) { if( comm_tbl[dir] >= 0 ) { T *sendptr = &sendbuf[ptr]; T *recvptr = &recvbuf[ptr]; size_t sz = gc * gc * gc * 3; /* recv if ( MPI_SUCCESS != MPI_Irecv(recvptr, sz, dtype, comm_tbl[dir], tag, MPI_COMM_WORLD, &req[dir*2]) ) return false; */ if ( !IrecvData(recvptr, sz, comm_tbl[dir], &req[dir*2]) ) return false; // pack switch(dir) { case int(C_mXmYmZ): #pragma omp parallel for collapse(4) for( int l=0; l<3; l++ ){ for( int k=1; k<=gc; k++ ){ for( int j=1; j<=gc; j++ ){ for( int i=1; i<=gc; i++ ){ sendptr[_IDX_V3D(i-1,j-1,k-1,l,gc,gc,gc,0)] = array[_IDX_V3D(i,j,k,l,NI,NJ,NK,VC)]; } } } } break; case int(C_pXmYmZ): #pragma omp parallel for collapse(4) for( int l=0; l<3; l++ ){ for( int k=1; k<=gc; k++ ){ for( int j=1; j<=gc; j++ ){ for( int i=NI-gc; i<NI; i++ ){ sendptr[_IDX_V3D(i-(NI-gc),j-1,k-1,l,gc,gc,gc,0)] = array[_IDX_V3D(i,j,k,l,NI,NJ,NK,VC)]; } } } } break; case int(C_mXpYmZ): #pragma omp parallel for collapse(4) for( int l=0; l<3; l++ ){ for( int k=1; k<=gc; k++ ){ for( int j=NJ-gc; j<NJ; j++ ){ for( int i=1; i<=gc; i++ ){ sendptr[_IDX_V3D(i-1,j-(NJ-gc),k-1,l,gc,gc,gc,0)] = array[_IDX_V3D(i,j,k,l,NI,NJ,NK,VC)]; } } } } break; case int(C_pXpYmZ): #pragma omp parallel for collapse(4) for( int l=0; l<3; l++ ){ for( int k=1; k<=gc; k++ ){ for( int j=NJ-gc; j<NJ; j++ ){ for( int i=NI-gc; i<NI; i++ ){ sendptr[_IDX_V3D(i-(NI-gc),j-(NJ-gc),k-1,l,gc,gc,gc,0)] = array[_IDX_V3D(i,j,k,l,NI,NJ,NK,VC)]; } } } } break; case int(C_mXmYpZ): #pragma omp parallel for collapse(4) for( int l=0; l<3; l++ ){ for( int k=NK-gc; k<NK; k++ ){ for( int j=1; j<=gc; j++ ){ for( int i=1; i<=gc; i++ ){ sendptr[_IDX_V3D(i-1,j-1,k-(NK-gc),l,gc,gc,gc,0)] = array[_IDX_V3D(i,j,k,l,NI,NJ,NK,VC)]; } } } } break; case int(C_pXmYpZ): #pragma omp parallel for collapse(4) for( int l=0; l<3; l++ ){ for( int k=NK-gc; k<NK; k++ ){ for( int j=1; j<=gc; j++ ){ for( int i=NI-gc; i<NI; i++ ){ sendptr[_IDX_V3D(i-(NI-gc),j-1,k-(NK-gc),l,gc,gc,gc,0)] = array[_IDX_V3D(i,j,k,l,NI,NJ,NK,VC)]; } } } } break; case int(C_mXpYpZ): #pragma omp parallel for collapse(4) for( int l=0; l<3; l++ ){ for( int k=NK-gc; k<NK; k++ ){ for( int j=NJ-gc; j<NJ; j++ ){ for( int i=1; i<=gc; i++ ){ sendptr[_IDX_V3D(i-1,j-(NJ-gc),k-(NK-gc),l,gc,gc,gc,0)] = array[_IDX_V3D(i,j,k,l,NI,NJ,NK,VC)]; } } } } break; case int(C_pXpYpZ): #pragma omp parallel for collapse(4) for( int l=0; l<3; l++ ){ for( int k=NK-gc; k<NK; k++ ){ for( int j=NJ-gc; j<NJ; j++ ){ for( int i=NI-gc; i<NI; i++ ){ sendptr[_IDX_V3D(i-(NI-gc),j-(NJ-gc),k-(NK-gc),l,gc,gc,gc,0)] = array[_IDX_V3D(i,j,k,l,NI,NJ,NK,VC)]; } } } } break; } /* send if ( MPI_SUCCESS != MPI_Isend(sendptr, sz, dtype, comm_tbl[dir], tag, MPI_COMM_WORLD, &req[dir*2+1]) ) return false; */ if ( !IsendData(sendptr, sz, comm_tbl[dir], &req[dir*2+1]) ) return false; // pointer ptr += sz; } } return true; } // ######################################################### /* * @brief unpack send data for diagonal corner * @param [out] array dest array * @param [in] gc number of guide cell layer to be sent * @param [in] recvbuf recv buffer */ template <class T> void BrickComm::unpack_VCnode(T *array, const int gc, const T *recvbuf) { int NI = size[0]; int NJ = size[1]; int NK = size[2]; int VC = halo_width; size_t ptr = 0; //// 8 corner //// for( int dir=int(C_mXmYmZ);dir<=int(C_pXpYpZ);dir++ ) { if( comm_tbl[dir] >= 0 ) { const T *recvptr = &recvbuf[ptr]; size_t sz = gc * gc * gc * 3; // unpack switch(dir) { case int(C_mXmYmZ): #pragma omp parallel for collapse(4) for( int l=0; l<3; l++ ){ for( int k=1-gc; k<=0; k++ ){ for( int j=1-gc; j<=0; j++ ){ for( int i=1-gc; i<=0; i++ ){ array[_IDX_V3D(i,j,k,l,NI,NJ,NK,VC)] = recvptr[_IDX_V3D(i-(1-gc),j-(1-gc),k-(1-gc),l,gc,gc,gc,0)]; } } } } break; case int(C_pXmYmZ): #pragma omp parallel for collapse(4) for( int l=0; l<3; l++ ){ for( int k=1-gc; k<=0; k++ ){ for( int j=1-gc; j<=0; j++ ){ for( int i=NI; i<NI+gc; i++ ){ array[_IDX_V3D(i,j,k,l,NI,NJ,NK,VC)] = recvptr[_IDX_V3D(i-(NI),j-(1-gc),k-(1-gc),l,gc,gc,gc,0)]; } } } } break; case int(C_mXpYmZ): #pragma omp parallel for collapse(4) for( int l=0; l<3; l++ ){ for( int k=1-gc; k<=0; k++ ){ for( int j=NJ; j<NJ+gc; j++ ){ for( int i=1-gc; i<=0; i++ ){ array[_IDX_V3D(i,j,k,l,NI,NJ,NK,VC)] = recvptr[_IDX_V3D(i-(1-gc),j-(NJ),k-(1-gc),l,gc,gc,gc,0)]; } } } } break; case int(C_pXpYmZ): #pragma omp parallel for collapse(4) for( int l=0; l<3; l++ ){ for( int k=1-gc; k<=0; k++ ){ for( int j=NJ; j<NJ+gc; j++ ){ for( int i=NI; i<NI+gc; i++ ){ array[_IDX_V3D(i,j,k,l,NI,NJ,NK,VC)] = recvptr[_IDX_V3D(i-(NI),j-(NJ),k-(1-gc),l,gc,gc,gc,0)]; } } } } break; case int(C_mXmYpZ): #pragma omp parallel for collapse(4) for( int l=0; l<3; l++ ){ for( int k=NK; k<NK+gc; k++ ){ for( int j=1-gc; j<=0; j++ ){ for( int i=1-gc; i<=0; i++ ){ array[_IDX_V3D(i,j,k,l,NI,NJ,NK,VC)] = recvptr[_IDX_V3D(i-(1-gc),j-(1-gc),k-(NK),l,gc,gc,gc,0)]; } } } } break; case int(C_pXmYpZ): #pragma omp parallel for collapse(4) for( int l=0; l<3; l++ ){ for( int k=NK; k<NK+gc; k++ ){ for( int j=1-gc; j<=0; j++ ){ for( int i=NI; i<NI+gc; i++ ){ array[_IDX_V3D(i,j,k,l,NI,NJ,NK,VC)] = recvptr[_IDX_V3D(i-(NI),j-(1-gc),k-(NK),l,gc,gc,gc,0)]; } } } } break; case int(C_mXpYpZ): #pragma omp parallel for collapse(4) for( int l=0; l<3; l++ ){ for( int k=NK; k<NK+gc; k++ ){ for( int j=NJ; j<NJ+gc; j++ ){ for( int i=1-gc; i<=0; i++ ){ array[_IDX_V3D(i,j,k,l,NI,NJ,NK,VC)] = recvptr[_IDX_V3D(i-(1-gc),j-(NJ),k-(NK),l,gc,gc,gc,0)]; } } } } break; case int(C_pXpYpZ): #pragma omp parallel for collapse(4) for( int l=0; l<3; l++ ){ for( int k=NK; k<NK+gc; k++ ){ for( int j=NJ; j<NJ+gc; j++ ){ for( int i=NI; i<NI+gc; i++ ){ array[_IDX_V3D(i,j,k,l,NI,NJ,NK,VC)] = recvptr[_IDX_V3D(i-(NI),j-(NJ),k-(NK),l,gc,gc,gc,0)]; } } } } break; } ptr += sz; } } } #endif // _DIAGONAL_COMM #endif // _CB_PACK_V_NODE_H_
bugged1.c
/****************************************************************************** * ЗАДАНИЕ: bugged1.c * ОПИСАНИЕ: * Данная программа демонстрирует использование конструкции 'parallel for'. * Однако, данный код вызывает ошибки компиляции. ******************************************************************************/ #include <omp.h> #include <stdio.h> #include <stdlib.h> #define N 50 #define CHUNKSIZE 5 int main(int argc, char **argv) { int i, chunk, tid; float a[N], b[N], c[N]; for (i = 0; i < N; i++) a[i] = b[i] = i * 1.0; chunk = CHUNKSIZE; // После omp for должен идти for, а не блок {} /* #pragma omp parallel for \ shared(a, b, c, chunk) \ private(i, tid) \ schedule(static, chunk) { tid = omp_get_thread_num(); for (i = 0; i < N; i++) { c[i] = a[i] + b[i]; printf("tid= %d i= %d c[i]= %f\n", tid, i, c[i]); } }*/ #pragma omp parallel \ shared(a, b, c, chunk) \ private(i, tid) { tid = omp_get_thread_num(); #pragma omp for schedule(static, chunk) for (i = 0; i < N; i++) { c[i] = a[i] + b[i]; printf("tid= %d i= %d c[i]= %f\n", tid, i, c[i]); } } }
mandel_omp_x_dynamic_256.c
/* Sequential Mandlebrot program */ #include <X11/Xlib.h> #include <X11/Xutil.h> #include <X11/Xos.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <omp.h> #include <time.h> #define X_RESN 1000 /* x resolution */ #define Y_RESN 1000 /* y resolution */ #define MAX_ITER (2000) #define CHUNK 256 // ref: https://stackoverflow.com/questions/6749621/how-to-create-a-high-resolution-timer-in-linux-to-measure-program-performance // call this function to start a nanosecond-resolution timer struct timespec timer_start() { struct timespec start_time; clock_gettime(CLOCK_MONOTONIC, &start_time); return start_time; } // call this function to end a timer, returning nanoseconds elapsed as a long long timer_end(struct timespec start_time){ struct timespec end_time; clock_gettime(CLOCK_MONOTONIC, &end_time); long diffInNanos = (end_time.tv_sec - start_time.tv_sec) * (long)1e9 + (end_time.tv_nsec - start_time.tv_nsec); return diffInNanos; } typedef struct complextype { double real, imag; } Compl; // Color conversion functions // ref: https://stackoverflow.com/questions/3018313/algorithm-to-convert-rgb-to-hsv-and-hsv-to-rgb-in-range-0-255-for-both typedef struct { double r; // a fraction between 0 and 1 double g; // a fraction between 0 and 1 double b; // a fraction between 0 and 1 } rgb; typedef struct { double h; // angle in degrees double s; // a fraction between 0 and 1 double v; // a fraction between 0 and 1 } hsv; static hsv rgb2hsv(rgb in); static rgb hsv2rgb(hsv in); hsv rgb2hsv(rgb in) { hsv out; double min, max, delta; min = in.r < in.g ? in.r : in.g; min = min < in.b ? min : in.b; max = in.r > in.g ? in.r : in.g; max = max > in.b ? max : in.b; out.v = max; // v delta = max - min; if (delta < 0.00001) { out.s = 0; out.h = 0; // undefined, maybe nan? return out; } if( max > 0.0 ) { // NOTE: if Max is == 0, this divide would cause a crash out.s = (delta / max); // s } else { // if max is 0, then r = g = b = 0 // s = 0, h is undefined out.s = 0.0; out.h = NAN; // its now undefined return out; } if( in.r >= max ) // > is bogus, just keeps compilor happy out.h = ( in.g - in.b ) / delta; // between yellow & magenta else if( in.g >= max ) out.h = 2.0 + ( in.b - in.r ) / delta; // between cyan & yellow else out.h = 4.0 + ( in.r - in.g ) / delta; // between magenta & cyan out.h *= 60.0; // degrees if( out.h < 0.0 ) out.h += 360.0; return out; } rgb hsv2rgb(hsv in) { double hh, p, q, t, ff; long i; rgb out; if(in.s <= 0.0) { // < is bogus, just shuts up warnings out.r = in.v; out.g = in.v; out.b = in.v; return out; } hh = in.h; if(hh >= 360.0) hh = 0.0; hh /= 60.0; i = (long)hh; ff = hh - i; p = in.v * (1.0 - in.s); q = in.v * (1.0 - (in.s * ff)); t = in.v * (1.0 - (in.s * (1.0 - ff))); switch(i) { case 0: out.r = in.v; out.g = t; out.b = p; break; case 1: out.r = q; out.g = in.v; out.b = p; break; case 2: out.r = p; out.g = in.v; out.b = t; break; case 3: out.r = p; out.g = q; out.b = in.v; break; case 4: out.r = t; out.g = p; out.b = in.v; break; case 5: default: out.r = in.v; out.g = p; out.b = q; break; } return out; } // maps a value from one interval to another double map(double t, double s0, double e0, double s1, double e1) { return (t - s0)/(e0 - s0)*(e1 - s1) + s1; } // Converts a linear double value to a color rgb colormap1(double t) { double u, v; hsv c; c.h = fmod(fmod(t*1000, 360.0) + 360.0, 360.0); c.s = 1.0; c.v = 0.5; return hsv2rgb(c); } rgb colormap2(double t) { double u, v; hsv c; c.h = map(sin(t*10), -1, 1, 0+150, 60+150); c.s = 1.0; c.v = map(sin(t*1), -1, 1, 0, 1); return hsv2rgb(c); } int dtoi(double d) { int i = d * 256; if (i < 0) i = 0; if (i > 255) i = 255; return i; } // converts a rgb color to a long unsigned long _RGB(rgb c) { return dtoi(c.b) + (dtoi(c.g)<<8) + (dtoi(c.r)<<16); } int main(int argc, char *argv[]) { Window win; /* initialization for a window */ GC gc; Display *display; // criando a janela { unsigned int width, height, /* window size */ x, y, /* window position */ border_width, /*border width in pixels */ display_width, display_height, /* size of screen */ screen; /* which screen */ char *window_name = "Mandelbrot Set", *display_name = NULL; unsigned long valuemask = 0; XGCValues values; XSizeHints size_hints; Pixmap bitmap; XPoint points[800]; FILE *fp, *fopen(); char str[100]; XSetWindowAttributes attr[1]; /* connect to Xserver */ if ((display = XOpenDisplay(display_name)) == NULL) { fprintf(stderr, "drawon: cannot connect to X server %s\n", XDisplayName(display_name)); exit(-1); } /* get screen size */ screen = DefaultScreen(display); display_width = DisplayWidth(display, screen); display_height = DisplayHeight(display, screen); /* set window size */ width = X_RESN; height = Y_RESN; /* set window position */ x = 0; y = 0; /* create opaque window */ border_width = 4; win = XCreateSimpleWindow(display, RootWindow(display, screen), x, y, width, height, border_width, BlackPixel(display, screen), WhitePixel(display, screen)); XSelectInput(display, win, StructureNotifyMask); size_hints.flags = USPosition | USSize; size_hints.x = x; size_hints.y = y; size_hints.width = width; size_hints.height = height; size_hints.min_width = 300; size_hints.min_height = 300; XSetNormalHints(display, win, &size_hints); XStoreName(display, win, window_name); /* create graphics context */ attr[0].backing_store = Always; attr[0].backing_planes = 1; attr[0].backing_pixel = BlackPixel(display, screen); XChangeWindowAttributes(display, win, CWBackingStore | CWBackingPlanes | CWBackingPixel, attr); XMapWindow(display, win); gc = XCreateGC(display, win, valuemask, &values); XSetBackground(display, gc, WhitePixel(display, screen)); XSetForeground(display, gc, BlackPixel(display, screen)); XSetLineAttributes(display, gc, 1, LineSolid, CapRound, JoinRound); // bug fix: must wait for the Map event to start drawing // otherwise, not all pixels will be drawn to screen for (;;) { XEvent e; XNextEvent(display, &e); if (e.type == MapNotify) break; } XSync(display, 0); } struct timespec vartime = timer_start(); /* Mandlebrot variables */ int *ks; ks = (int *)malloc((X_RESN*Y_RESN) * sizeof(int)); double *ds; ds = (double *)malloc((X_RESN*Y_RESN) * sizeof(double)); /* Calculate and draw points */ #pragma omp parallel default(shared) { int num_threads = omp_get_num_threads(); // printf("num_threads = %d\n", num_threads); #pragma omp for schedule(dynamic, CHUNK) for (int it = 0; it < X_RESN*Y_RESN; it++) { int i = it / Y_RESN; int j = it % Y_RESN; // mandelbrot set is defined in the region of x = [-2, +2] and y = [-2, +2] double u = ((double)i - (X_RESN / 2.0)) / (X_RESN / 4.0); double v = ((double)j - (Y_RESN / 2.0)) / (Y_RESN / 4.0); Compl z, c, t; z.real = z.imag = 0.0; c.real = v; c.imag = u; int k = 0; double d = 0.0; double lengthsq, temp; do { /* iterate for pixel color */ t = z; z.imag = 2.0 * t.real * t.imag + c.imag; z.real = t.real * t.real - t.imag * t.imag + c.real; lengthsq = z.real * z.real + z.imag * z.imag; d += pow(pow(z.imag - t.imag, 2.0) + pow(z.real - t.real, 2.0), 0.5); k++; } while (lengthsq < 4.0 && k < MAX_ITER); ks[it] = k; ds[it] = d; } } { int i, j, k; double d; for (int it_pixel = 0; it_pixel < (X_RESN * Y_RESN); it_pixel++) { int i = it_pixel / Y_RESN; int j = it_pixel % Y_RESN; k = ks[it_pixel]; d = ds[it_pixel]; // if (k == MAX_ITER) { rgb c; c.r = 1.0; c.g = 0.8; c.b = 0; XSetForeground(display, gc, k == MAX_ITER ? _RGB(colormap2(sin(d))) : _RGB(colormap1(k/(double)MAX_ITER))); // XSetForeground(display, gc, _RGB(colormap1(d))); // XSetForeground(display, gc, _RGB(c)); // XSetForeground(display, gc, 0xFFD000); XDrawPoint(display, win, gc, j, i); } } XFlush(display); free(ks); free(ds); long time_elapsed_nanos = timer_end(vartime); double elapsed = time_elapsed_nanos*0.000000001; printf("%lf\n", elapsed); sleep(30); } /* Program Finished */ return 0; }
fox_floats_timer_caching_omp_fileIO_benchmark.c
/* fox_floats_timer_caching_omp_fileIO_benchmark.c -- uses Fox's algorithm to multiply two square matrices * * Implementation of parallel matrix multiplication: * LaTeX: $C_{i,j} = \sum_{k} A_{i,k}B_{k,j}$ * * Input: * Input Matrix file name: A.dat, B.dat * * Output: * Output Matrix file name: C.dat * Output Sub-matrices file name: SubMatrices.dat * * Notes: * 1. Assumes the number of processes is a perfect square * 2. The array member of the matrices is statically allocated * * See Chap 7, pp. 113 & ff and pp. 125 & ff in PPMPI */ /* Compiler command: * mpiicc -O3 -qopenmp -qopt-report-phase=vec -qopt-report=3 fox_floats_timer_caching_omp_fileIO_benchmark.c * -o fox_floats_timer_caching_omp_fileIO_benchmark * * Run command: * mpirun -n -4 ./fox_floats_timer_caching_omp */ /* Head files */ #include <stdio.h> #include <math.h> #include <stdlib.h> #include <mpi.h> #include <omp.h> // define problem scale, matrix row/col size #define PROBLEM_SCALE 512 // define whether or not Print Matices in the Command Line #define PRINT_A 0 #define PRINT_B 0 #define PRINT_C 0 #define PRINT_LOCAL_A 0 #define PRINT_LOCAL_B 0 #define PRINT_LOCAL_C 0 // define float precision, 4 byte single-precision float or 8 byte double-precision float #define FLOAT double #define FLOAT_MPI MPI_DOUBLE // Define threads speed-up affnity in the computing #define NUM_THREADS 2 // Define threads affinity "scatter" or "compact" #define AFFINITY "KMP_AFFINITY = compact" /* Type define structure of process grid */ typedef struct { int p; /* Total number of processes */ MPI_Comm comm; /* Communicator for entire grid */ MPI_Comm row_comm; /* Communicator for my row */ MPI_Comm col_comm; /* Communicator for my col */ int q; /* Order of grid */ int my_row; /* My row number */ int my_col; /* My column number */ int my_rank; /* My rank in the grid comm */ } GRID_INFO_T; /* Type define structure of local matrix */ #define MAX 2097152 // Maximum number of elements in the array that store the local matrix (2^21) typedef struct { int n_bar; #define Order(A) ((A)->n_bar) // defination with parameters FLOAT entries[MAX]; #define Entry(A,i,j) (*(((A)->entries) + ((A)->n_bar)*(i) + (j))) // defination with parameters, Array dereference } LOCAL_MATRIX_T; /* Function Declarations */ LOCAL_MATRIX_T* Local_matrix_allocate(int n_bar); void Free_local_matrix(LOCAL_MATRIX_T** local_A); void Read_matrix_A(char* prompt, LOCAL_MATRIX_T* local_A, GRID_INFO_T* grid, int n); // Read matrix A from a file void Read_matrix_B(char* prompt, LOCAL_MATRIX_T* local_B, // for continuous memory access, local A(i,k)*B(k,j) = A(i,k)*B^{T}(j,k) GRID_INFO_T* grid, int n); // Read matrix B from a file void Print_matrix_A(char* title, LOCAL_MATRIX_T* local_A, GRID_INFO_T* grid, int n); // Print matrix A in the command line void Print_matrix_B(char* title, LOCAL_MATRIX_T* local_B, // Speical print function for local matrix B^{T}(j,k) GRID_INFO_T* grid, int n); // Print matrix B in the command line void Print_matrix_C(char* title, LOCAL_MATRIX_T* local_C, GRID_INFO_T* grid, int n); // Print matrix C in the command line void Set_to_zero(LOCAL_MATRIX_T* local_A); void Local_matrix_multiply(LOCAL_MATRIX_T* local_A, LOCAL_MATRIX_T* local_B, LOCAL_MATRIX_T* local_C); void Build_matrix_type(LOCAL_MATRIX_T* local_A); MPI_Datatype local_matrix_mpi_t; LOCAL_MATRIX_T* temp_mat; // global LOCAL_MATRIX_T* type pointer void Print_local_matrices_A(char* title, LOCAL_MATRIX_T* local_A, GRID_INFO_T* grid); void Print_local_matrices_B(char* title, LOCAL_MATRIX_T* local_B, // Speical print function for local matrix B^{T}(j,k) GRID_INFO_T* grid); void Print_local_matrices_C(char* title, LOCAL_MATRIX_T* local_B, GRID_INFO_T* grid); void Write_matrix_C(char* title, LOCAL_MATRIX_T* local_C, GRID_INFO_T* grid, int n); // Write matrix multiplication to a file void Write_local_matrices_A(char* title, LOCAL_MATRIX_T* local_A, GRID_INFO_T* grid); // Write local matrix A to a file void Write_local_matrices_B(char* title, LOCAL_MATRIX_T* local_B, // Speical print function for local matrix B^{T}(j,k) GRID_INFO_T* grid); // Write local matrix B to a file void Write_local_matrices_C(char* title, LOCAL_MATRIX_T* local_A, GRID_INFO_T* grid); // Write local matrix C to a file /*********************************************************/ main(int argc, char* argv[]) { FILE *fp; int p; int my_rank; GRID_INFO_T grid; LOCAL_MATRIX_T* local_A; LOCAL_MATRIX_T* local_B; LOCAL_MATRIX_T* local_C; int n; int n_bar; double timer_start; double timer_end; int content; int i; int j; void Setup_grid(GRID_INFO_T* grid); void Fox(int n, GRID_INFO_T* grid, LOCAL_MATRIX_T* local_A, LOCAL_MATRIX_T* local_B, LOCAL_MATRIX_T* local_C); // Matrix Generator fp = fopen("A.dat", "w"); // Generate and print matrix A into a file for (i = 0; i < PROBLEM_SCALE; i++) { for (j = 0; j < PROBLEM_SCALE; j++) if(i == j){ fprintf(fp,"%d ", 1); } else { fprintf(fp,"%d ", 0); } fprintf(fp,"\n"); } fclose(fp); fp = fopen("B.dat", "w"); // Generate and print matrix B into a file for (i = 0; i < PROBLEM_SCALE; i++){ for (j = 0; j < PROBLEM_SCALE; j++) fprintf(fp,"%d ", (i*PROBLEM_SCALE)+j); fprintf(fp, "\n"); } fclose(fp); // SPMD Mode start from here (Processess fork from here) MPI_Init(&argc, &argv); // MPI initializing MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); // Get my process id in the MPI communicator // Initial OpenMP Environment omp_set_num_threads(NUM_THREADS); kmp_set_defaults(AFFINITY); Setup_grid(&grid); // Set up Processess grid if (my_rank == 0) { fp = fopen("A.dat","r"); n = 0; while((content = fgetc(fp)) != EOF) { //printf("fgetc = %d\n", content); if(content != 0x20 && content != 0x0A) n++; } fclose(fp); n = (int) sqrt((double) n); printf("We read the order of the matrices from A.dat is\n %d\n", n); // while(fgetc(fp) != EOF) n++; // printf("What's the order of the matrices?\n"); // scanf("%d", &n); // Overall Matrix's Order } MPI_Bcast(&n, 1, MPI_INT, 0, MPI_COMM_WORLD); // MPI broadcast the overall matrix's order n_bar = n/grid.q; // \bar n is the local matrix's order local_A = Local_matrix_allocate(n_bar); // Allocate local matrix A Order(local_A) = n_bar; // Local matrix A's order Read_matrix_A("Read A from A.dat", local_A, &grid, n); // Read local matrices A from process 0 by using stdin, and send them to each process (Procedure) if (PRINT_A == 1) Print_matrix_A("We read A =", local_A, &grid, n);// Print local matrices A from process 0 by using stdout, and send them to each process (Procedure) local_B = Local_matrix_allocate(n_bar); // Allocate local matrix Order(local_B) = n_bar; // Local matrix B's order Read_matrix_B("Read B from B.dat", local_B, &grid, n); // Read local matrix B as it's local transpose from process 0 by using stdin, and send them to each process (Procedure) if (PRINT_B == 1) Print_matrix_B("We read B =", local_B, &grid, n);// Print local matrix B as it's local transpose from process 0 by using stdout, and send them to each process (Procedure) Build_matrix_type(local_A); // Buid local_A's MPI matrix data type temp_mat = Local_matrix_allocate(n_bar); // Allocate temporary matrix of order n $\time$ n local_C = Local_matrix_allocate(n_bar); // Allocate matrix local_C Order(local_C) = n_bar; // Set matrix local_C's order MPI_Barrier(MPI_COMM_WORLD); // Set the MPI process barrier timer_start = MPI_Wtime(); // Get the MPI wall time Fox(n, &grid, local_A, local_B, local_C); // FOX parallel matrix multiplication Algorithm implement function timer_end = MPI_Wtime(); // Get the MPI wall time MPI_Barrier(MPI_COMM_WORLD); // Set the MPI process barrier Write_matrix_C("Write C into the C.dat", local_C, &grid, n); // Print matrix local_C (parallel matrix multiplication result) if (PRINT_C == 1) Print_matrix_C("The product is", local_C, &grid, n); // Print matrix local_C (parallel matrix multiplication result) Write_local_matrices_A("Write split of local matrix A into local_A.dat", local_A, &grid); // Write local matrix A into file if (PRINT_LOCAL_A == 1) Print_local_matrices_A("Split of local matrix A", local_A, &grid); // Print matrix A split in processess Write_local_matrices_B("Write split of local matrix B into local_B.dat", local_B, &grid); // Write local matrix B into file, special for row-major storage if (PRINT_LOCAL_B == 1) Print_local_matrices_B("Split of local matrix B", local_B, &grid); // Print matrix B split in processess, special for row-major storage Write_local_matrices_C("Write split of local matrix C into local_C.dat", local_C, &grid); // Print matrix C split in processess if (PRINT_LOCAL_C == 1) Print_local_matrices_C("Split of local matrix C", local_C, &grid); // Print matrix C split in processess Free_local_matrix(&local_A); // Free local matrix local_A Free_local_matrix(&local_B); // Free local matrix local_B Free_local_matrix(&local_C); // Free local matrix local_C if(my_rank == 0) printf("Parallel Fox Matrix Multiplication Elapsed time:\n %30.20E seconds\n", timer_end-timer_start); MPI_Finalize(); // MPI finalize, processes join and resource recycle } /* main */ /*********************************************************/ void Setup_grid( GRID_INFO_T* grid /* out */) { int old_rank; int dimensions[2]; int wrap_around[2]; int coordinates[2]; int free_coords[2]; /* Set up Global Grid Information */ MPI_Comm_size(MPI_COMM_WORLD, &(grid->p)); MPI_Comm_rank(MPI_COMM_WORLD, &old_rank); /* We assume p is a perfect square */ // but what if it's not a perfect square grid->q = (int) sqrt((double) grid->p); dimensions[0] = dimensions[1] = grid->q; /* We want a circular shift in second dimension. */ /* Don't care about first */ wrap_around[0] = wrap_around[1] = 1; MPI_Cart_create(MPI_COMM_WORLD, 2, dimensions, wrap_around, 1, &(grid->comm)); MPI_Comm_rank(grid->comm, &(grid->my_rank)); MPI_Cart_coords(grid->comm, grid->my_rank, 2, coordinates); grid->my_row = coordinates[0]; grid->my_col = coordinates[1]; /* Set up row communicators */ free_coords[0] = 0; free_coords[1] = 1; MPI_Cart_sub(grid->comm, free_coords, &(grid->row_comm)); /* Set up column communicators */ free_coords[0] = 1; free_coords[1] = 0; MPI_Cart_sub(grid->comm, free_coords, &(grid->col_comm)); } /* Setup_grid */ /*********************************************************/ void Fox( int n /* in */, GRID_INFO_T* grid /* in */, LOCAL_MATRIX_T* local_A /* in */, LOCAL_MATRIX_T* local_B /* in */, LOCAL_MATRIX_T* local_C /* out */) { LOCAL_MATRIX_T* temp_A; /* Storage for the sub- */ /* matrix of A used during */ /* the current stage */ int stage; int bcast_root; int n_bar; /* n/sqrt(p) */ int source; int dest; MPI_Status status; n_bar = n/grid->q; Set_to_zero(local_C); /* Calculate addresses for row circular shift of B */ source = (grid->my_row + 1) % grid->q; dest = (grid->my_row + grid->q - 1) % grid->q; /* Set aside storage for the broadcast block of A */ temp_A = Local_matrix_allocate(n_bar); for (stage = 0; stage < grid->q; stage++) { bcast_root = (grid->my_row + stage) % grid->q; if (bcast_root == grid->my_col) { // Process P_{ii} broadcast A_{ii} in process gird's row commnunicator MPI_Bcast(local_A, 1, local_matrix_mpi_t, bcast_root, grid->row_comm); Local_matrix_multiply(local_A, local_B, local_C); } else { // temp_A is a buffer for process P_{ij} to store A_{ij} MPI_Bcast(temp_A, 1, local_matrix_mpi_t, bcast_root, grid->row_comm); Local_matrix_multiply(temp_A, local_B, local_C); } MPI_Sendrecv_replace(local_B, 1, local_matrix_mpi_t, // MPI send and receive with single buffer dest, 0, source, 0, grid->col_comm, &status); // Circular shift of process grid B's row, after local multiplication operation } /* for */ } /* Fox */ /*********************************************************/ LOCAL_MATRIX_T* Local_matrix_allocate(int local_order) { LOCAL_MATRIX_T* temp; temp = (LOCAL_MATRIX_T*) malloc(sizeof(LOCAL_MATRIX_T)); return temp; } /* Local_matrix_allocate */ /*********************************************************/ void Free_local_matrix( LOCAL_MATRIX_T** local_A_ptr /* in/out */) { free(*local_A_ptr); } /* Free_local_matrix */ /*********************************************************/ /* Read and distribute matrix for matrix A: * foreach global row of the matrix, * foreach grid column * read a block of n_bar floats on process 0 * and send them to the appropriate process. */ void Read_matrix_A( char* prompt /* in */, LOCAL_MATRIX_T* local_A /* out */, GRID_INFO_T* grid /* in */, int n /* in */) { FILE *fp; int mat_row, mat_col; int grid_row, grid_col; int dest; int coords[2]; FLOAT* temp; MPI_Status status; if (grid->my_rank == 0) { // Process 0 read matrix input from stdin and send them to other processess fp = fopen("A.dat","r"); temp = (FLOAT*) malloc(Order(local_A)*sizeof(FLOAT)); printf("%s\n", prompt); fflush(stdout); for (mat_row = 0; mat_row < n; mat_row++) { grid_row = mat_row/Order(local_A); coords[0] = grid_row; for (grid_col = 0; grid_col < grid->q; grid_col++) { coords[1] = grid_col; MPI_Cart_rank(grid->comm, coords, &dest); if (dest == 0) { for (mat_col = 0; mat_col < Order(local_A); mat_col++) fscanf(fp, "%lf", (local_A->entries)+mat_row*Order(local_A)+mat_col); /* scanf("%lf", (local_A->entries)+mat_row*Order(local_A)+mat_col); */ } else { for(mat_col = 0; mat_col < Order(local_A); mat_col++) fscanf(fp,"%lf", temp + mat_col); // scanf("%lf", temp + mat_col); MPI_Send(temp, Order(local_A), FLOAT_MPI, dest, 0, grid->comm); } } } free(temp); fclose(fp); } else { // Other processess receive matrix from process 0 for (mat_row = 0; mat_row < Order(local_A); mat_row++) MPI_Recv(&Entry(local_A, mat_row, 0), Order(local_A), FLOAT_MPI, 0, 0, grid->comm, &status); } } /* Read_matrix */ /*********************************************************/ /* Read and distribute matrix for local matrix B's transpose: * foreach global row of the matrix, * foreach grid column * read a block of n_bar floats on process 0 * and send them to the appropriate process. */ void Read_matrix_B( char* prompt /* in */, LOCAL_MATRIX_T* local_B /* out */, GRID_INFO_T* grid /* in */, int n /* in */) { FILE *fp; int mat_row, mat_col; int grid_row, grid_col; int dest; int coords[2]; FLOAT *temp; MPI_Status status; if (grid->my_rank == 0) { // Process 0 read matrix input from stdin and send them to other processess fp = fopen("B.dat","r"); temp = (FLOAT*) malloc(Order(local_B)*sizeof(FLOAT)); printf("%s\n", prompt); fflush(stdout); for (mat_row = 0; mat_row < n; mat_row++) { grid_row = mat_row/Order(local_B); coords[0] = grid_row; for (grid_col = 0; grid_col < grid->q; grid_col++) { coords[1] = grid_col; MPI_Cart_rank(grid->comm, coords, &dest); if (dest == 0) { // process 0 (local) for (mat_col = 0; mat_col < Order(local_B); mat_col++) fscanf(fp, "%lf", (local_B->entries)+mat_col*Order(local_B)+mat_row); // switch rows and colums in local_B, for column major storage /* scanf("%lf", (local_B->entries)+mat_col*Order(local_B)+mat_row); // switch rows and colums in local_B, for column major storage */ /* scanf("%lf", (local_A->entries)+mat_row*Order(local_A)+mat_col); */ } else { for(mat_col = 0; mat_col < Order(local_B); mat_col++) fscanf(fp, "%lf", temp + mat_col); // scanf("%lf", temp + mat_col); MPI_Send(temp, Order(local_B), FLOAT_MPI, dest, 0, grid->comm); } } } free(temp); fclose(fp); } else { // Other processess receive matrix from process 0 temp = (FLOAT*) malloc(Order(local_B)*sizeof(FLOAT)); // switch rows and colums in local_B, for column major storage for (mat_col = 0; mat_col < Order(local_B); mat_col++) { MPI_Recv(temp, Order(local_B), FLOAT_MPI, 0, 0, grid->comm, &status); // switch rows and colums in local_B, for column major storage for(mat_row = 0; mat_row < Order(local_B); mat_row++) Entry(local_B, mat_row, mat_col) = *(temp + mat_row); // switch rows and colums in local_B, for column major storage /* MPI_Recv(&Entry(local_A, mat_row, 0), Order(local_A), FLOAT_MPI, 0, 0, grid->comm, &status); */ } free(temp); } } /* Read_matrix_B */ /*********************************************************/ /* Recive and Print Matrix A: * foreach global row of the matrix, * foreach grid column * send n_bar floats to process 0 from each other process * receive a block of n_bar floats on process 0 from other processes and print them */ void Print_matrix_A( char* title /* in */, LOCAL_MATRIX_T* local_A /* out */, GRID_INFO_T* grid /* in */, int n /* in */) { int mat_row, mat_col; int grid_row, grid_col; int source; int coords[2]; FLOAT* temp; MPI_Status status; if (grid->my_rank == 0) { temp = (FLOAT*) malloc(Order(local_A)*sizeof(FLOAT)); printf("%s\n", title); for (mat_row = 0; mat_row < n; mat_row++) { grid_row = mat_row/Order(local_A); coords[0] = grid_row; for (grid_col = 0; grid_col < grid->q; grid_col++) { coords[1] = grid_col; MPI_Cart_rank(grid->comm, coords, &source); if (source == 0) { for(mat_col = 0; mat_col < Order(local_A); mat_col++) printf("%20.15E ", Entry(local_A, mat_row, mat_col)); } else { MPI_Recv(temp, Order(local_A), FLOAT_MPI, source, 0, grid->comm, &status); for(mat_col = 0; mat_col < Order(local_A); mat_col++) printf("%20.15E ", temp[mat_col]); } } printf("\n"); } free(temp); } else { for (mat_row = 0; mat_row < Order(local_A); mat_row++) MPI_Send(&Entry(local_A, mat_row, 0), Order(local_A), FLOAT_MPI, 0, 0, grid->comm); } } /* Print_matrix_A */ /*********************************************************/ /* Recive and Print Matrix for local matrix B's transpose: * foreach global row of the matrix, * foreach grid column * send n_bar floats to process 0 from each other process * receive a block of n_bar floats on process 0 from other processes and print them */ void Print_matrix_B( char* title /* in */, LOCAL_MATRIX_T* local_B /* out */, GRID_INFO_T* grid /* in */, int n /* in */) { int mat_row, mat_col; int grid_row, grid_col; int source; int coords[2]; FLOAT* temp; MPI_Status status; if (grid->my_rank == 0) { temp = (FLOAT*) malloc(Order(local_B)*sizeof(FLOAT)); printf("%s\n", title); for (mat_row = 0; mat_row < n; mat_row++) { grid_row = mat_row/Order(local_B); coords[0] = grid_row; for (grid_col = 0; grid_col < grid->q; grid_col++) { coords[1] = grid_col; MPI_Cart_rank(grid->comm, coords, &source); if (source == 0) { for(mat_col = 0; mat_col < Order(local_B); mat_col++) printf("%20.15E ", Entry(local_B, mat_col, mat_row)); // switch rows and colums in local_B, for column major storage // printf("%20.15E ", Entry(local_A, mat_row, mat_col)); } else { MPI_Recv(temp, Order(local_B), FLOAT_MPI, source, 0, grid->comm, &status); for(mat_col = 0; mat_col < Order(local_B); mat_col++) printf("%20.15E ", temp[mat_col]); } } printf("\n"); } free(temp); } else { temp = (FLOAT*) malloc(Order(local_B)*sizeof(FLOAT)); for (mat_col = 0; mat_col < Order(local_B); mat_col++) { for(mat_row = 0; mat_row < Order(local_B); mat_row++) *(temp+mat_row) = Entry(local_B, mat_row, mat_col); // switch rows and colums in local_B, for column major storage MPI_Send(temp, Order(local_B), FLOAT_MPI, 0, 0, grid->comm); } free(temp); } } /* Print_matrix_B */ /*********************************************************/ /* Recive and Print Matrix A: * foreach global row of the matrix, * foreach grid column * send n_bar floats to process 0 from each other process * receive a block of n_bar floats on process 0 from other processes and print them */ void Print_matrix_C( char* title /* in */, LOCAL_MATRIX_T* local_C /* out */, GRID_INFO_T* grid /* in */, int n /* in */) { int mat_row, mat_col; int grid_row, grid_col; int source; int coords[2]; FLOAT* temp; MPI_Status status; if (grid->my_rank == 0) { temp = (FLOAT*) malloc(Order(local_C)*sizeof(FLOAT)); printf("%s\n", title); for (mat_row = 0; mat_row < n; mat_row++) { grid_row = mat_row/Order(local_C); coords[0] = grid_row; for (grid_col = 0; grid_col < grid->q; grid_col++) { coords[1] = grid_col; MPI_Cart_rank(grid->comm, coords, &source); if (source == 0) { for(mat_col = 0; mat_col < Order(local_C); mat_col++) printf("%20.15E ", Entry(local_C, mat_row, mat_col)); } else { MPI_Recv(temp, Order(local_C), FLOAT_MPI, source, 0, grid->comm, &status); for(mat_col = 0; mat_col < Order(local_C); mat_col++) printf("%20.15E ", temp[mat_col]); } } printf("\n"); } free(temp); } else { for (mat_row = 0; mat_row < Order(local_C); mat_row++) MPI_Send(&Entry(local_C, mat_row, 0), Order(local_C), FLOAT_MPI, 0, 0, grid->comm); } } /* Print_matrix_C */ /*********************************************************/ /* Recive and Write Matrix C into a file: * foreach global row of the matrix, * foreach grid column * send n_bar floats to process 0 from each other process * receive a block of n_bar floats on process 0 from other processes and print them */ void Write_matrix_C( char* title /* in */, LOCAL_MATRIX_T* local_C /* out */, GRID_INFO_T* grid /* in */, int n /* in */) { FILE *fp; int mat_row, mat_col; int grid_row, grid_col; int source; int coords[2]; FLOAT* temp; MPI_Status status; if (grid->my_rank == 0) { fp = fopen("C.dat", "w+"); temp = (FLOAT*) malloc(Order(local_C)*sizeof(FLOAT)); printf("%s\n", title); for (mat_row = 0; mat_row < n; mat_row++) { grid_row = mat_row/Order(local_C); coords[0] = grid_row; for (grid_col = 0; grid_col < grid->q; grid_col++) { coords[1] = grid_col; MPI_Cart_rank(grid->comm, coords, &source); if (source == 0) { for(mat_col = 0; mat_col < Order(local_C); mat_col++) fprintf(fp, "%20.15E ", Entry(local_C, mat_row, mat_col)); // printf("%20.15E ", Entry(local_A, mat_row, mat_col)); } else { MPI_Recv(temp, Order(local_C), FLOAT_MPI, source, 0, grid->comm, &status); for(mat_col = 0; mat_col < Order(local_C); mat_col++) fprintf(fp, "%20.15E ", temp[mat_col]); // printf("%20.15E ", temp[mat_col]); } } fprintf(fp,"\n"); } free(temp); fclose(fp); } else { for (mat_row = 0; mat_row < Order(local_C); mat_row++) MPI_Send(&Entry(local_C, mat_row, 0), Order(local_C), FLOAT_MPI, 0, 0, grid->comm); } } /* Write_matrix_C */ /*********************************************************/ /* * Set local matrix's element to zero */ void Set_to_zero( LOCAL_MATRIX_T* local_A /* out */) { int i, j; for (i = 0; i < Order(local_A); i++) for (j = 0; j < Order(local_A); j++) Entry(local_A,i,j) = 0.0E0; } /* Set_to_zero */ /*********************************************************/ void Build_matrix_type( LOCAL_MATRIX_T* local_A /* in */) { MPI_Datatype temp_mpi_t; int block_lengths[2]; MPI_Aint displacements[2]; MPI_Datatype typelist[2]; MPI_Aint start_address; MPI_Aint address; MPI_Type_contiguous(Order(local_A)*Order(local_A), FLOAT_MPI, &temp_mpi_t); // Creates a contiguous datatype /* Synopsis int MPI_Type_contiguous(int count, MPI_Datatype oldtype, MPI_Datatype *newtype) Input Parameters count replication count (nonnegative integer) oldtype old datatype (handle) */ block_lengths[0] = block_lengths[1] = 1; typelist[0] = MPI_INT; typelist[1] = temp_mpi_t; MPI_Address(local_A, &start_address); // Gets the address of a location in caller's memory MPI_Address(&(local_A->n_bar), &address); /* Synopsis int MPI_Address(const void *location, MPI_Aint *address) Input Parameters location location in caller memory (choice) Output Parameters address address of location (address integer) */ displacements[0] = address - start_address; MPI_Address(local_A->entries, &address); displacements[1] = address - start_address; MPI_Type_struct(2, block_lengths, displacements, typelist, &local_matrix_mpi_t); // Creates a struct datatype /* Synopsis int MPI_Type_struct(int count, const int *array_of_blocklengths, const MPI_Aint *array_of_displacements, const MPI_Datatype *array_of_types, MPI_Datatype *newtype) Input Parameters count number of blocks (integer) -- also number of entries in arrays array_of_types , array_of_displacements and array_of_blocklengths array_of_blocklengths number of elements in each block (array) array_of_displacements byte displacement of each block (array) array_of_types type of elements in each block (array of handles to datatype objects) Output Parameters newtype new datatype (handle) */ MPI_Type_commit(&local_matrix_mpi_t); // Commits the datatype /* Synopsis int MPI_Type_commit(MPI_Datatype *datatype) Input Parameters datatype datatype (handle) */ } /* Build_matrix_type */ /*********************************************************/ /* local matrix multiplication function * withing OpenMP Thread Acceleration */ void Local_matrix_multiply( LOCAL_MATRIX_T* local_A /* in */, LOCAL_MATRIX_T* local_B /* in */, LOCAL_MATRIX_T* local_C /* out */) { int i, j, k; // int my_rank; // MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); // Get my process id in the MPI communicator #pragma omp parallel for private(i, j, k) shared(local_A, local_B, local_C) num_threads(NUM_THREADS) // Threads acceleration upgrade, parallel task split for (i = 0; i < Order(local_A); i++) { // printf("Current in the Fox Kernel:\n my process id is %d, my thread id is %d\n",my_rank,omp_get_thread_num()); for (j = 0; j < Order(local_A); j++) for (k = 0; k < Order(local_B); k++) Entry(local_C,i,j) = Entry(local_C,i,j) // switch rows and colums in local_B, for column major storage + Entry(local_A,i,k)*Entry(local_B,j,k); // continuous memory access, local matrix multiplication A(i,k)*B^T(j,k) /* Entry(local_C,i,j) = Entry(local_C,i,j) + Entry(local_A,i,k)*Entry(local_B,k,j); // non-continuous memory access, A(i,k)*B^T(j,k) is more proper */ } } /* Local_matrix_multiply */ /*********************************************************/ /* Recive and Print Local Matrix A: * Process 0 print local matrix local_A * Other Processess send local matrix local_A to process 0 * And process 0 receive local matrix local_A from other processess */ void Print_local_matrices_A( char* title /* in */, LOCAL_MATRIX_T* local_A /* in */, GRID_INFO_T* grid /* in */) { int coords[2]; int i, j; int source; MPI_Status status; // print by process No.0 in process mesh if (grid->my_rank == 0) { printf("%s\n", title); printf("Process %d > grid_row = %d, grid_col = %d\n", grid->my_rank, grid->my_row, grid->my_col); for (i = 0; i < Order(local_A); i++) { for (j = 0; j < Order(local_A); j++) printf("%20.15E ", Entry(local_A,i,j)); printf("\n"); } for (source = 1; source < grid->p; source++) { MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0, grid->comm, &status); MPI_Cart_coords(grid->comm, source, 2, coords); printf("Process %d > grid_row = %d, grid_col = %d\n", source, coords[0], coords[1]); for (i = 0; i < Order(temp_mat); i++) { for (j = 0; j < Order(temp_mat); j++) printf("%20.15E ", Entry(temp_mat,i,j)); printf("\n"); } } fflush(stdout); } else { MPI_Send(local_A, 1, local_matrix_mpi_t, 0, 0, grid->comm); } } /* Print_local_matrices_A */ /*********************************************************/ /* Recive and Print Local Matrix for local matrix B's transpose: * Process 0 print local matrix local_A * Other Processess send local matrix local_A to process 0 * And process 0 receive local matrix local_A from other processess */ void Print_local_matrices_B( char* title /* in */, LOCAL_MATRIX_T* local_B /* in */, GRID_INFO_T* grid /* in */) { int coords[2]; int i, j; int source; MPI_Status status; // print by process No.0 in process mesh if (grid->my_rank == 0) { printf("%s\n", title); printf("Process %d > grid_row = %d, grid_col = %d\n", grid->my_rank, grid->my_row, grid->my_col); for (i = 0; i < Order(local_B); i++) { for (j = 0; j < Order(local_B); j++) printf("%20.15E ", Entry(local_B,j,i)); // switch rows and colums in local_B, for column major storage printf("\n"); } for (source = 1; source < grid->p; source++) { MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0, grid->comm, &status); MPI_Cart_coords(grid->comm, source, 2, coords); printf("Process %d > grid_row = %d, grid_col = %d\n", source, coords[0], coords[1]); for (i = 0; i < Order(temp_mat); i++) { for (j = 0; j < Order(temp_mat); j++) printf("%20.15E ", Entry(temp_mat,j,i)); // switch rows and colums in local_B, for column major storage printf("\n"); } } fflush(stdout); } else { MPI_Send(local_B, 1, local_matrix_mpi_t, 0, 0, grid->comm); } } /* Print_local_matrices_B */ /*********************************************************/ /* Recive and Print Local Matrix A: * Process 0 print local matrix local_A * Other Processess send local matrix local_A to process 0 * And process 0 receive local matrix local_A from other processess */ void Print_local_matrices_C( char* title /* in */, LOCAL_MATRIX_T* local_C /* in */, GRID_INFO_T* grid /* in */) { int coords[2]; int i, j; int source; MPI_Status status; // print by process No.0 in process mesh if (grid->my_rank == 0) { printf("%s\n", title); printf("Process %d > grid_row = %d, grid_col = %d\n", grid->my_rank, grid->my_row, grid->my_col); for (i = 0; i < Order(local_C); i++) { for (j = 0; j < Order(local_C); j++) printf("%20.15E ", Entry(local_C,i,j)); printf("\n"); } for (source = 1; source < grid->p; source++) { MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0, grid->comm, &status); MPI_Cart_coords(grid->comm, source, 2, coords); printf("Process %d > grid_row = %d, grid_col = %d\n", source, coords[0], coords[1]); for (i = 0; i < Order(temp_mat); i++) { for (j = 0; j < Order(temp_mat); j++) printf("%20.15E ", Entry(temp_mat,i,j)); printf("\n"); } } fflush(stdout); } else { MPI_Send(local_C, 1, local_matrix_mpi_t, 0, 0, grid->comm); } } /* Print_local_matrices_C */ /*********************************************************/ /* Recive and Write Local Matrix A: * Process 0 print local matrix local_A * Other Processess send local matrix local_A to process 0 * And process 0 receive local matrix local_A from other processess */ void Write_local_matrices_A( char* title /* in */, LOCAL_MATRIX_T* local_A /* in */, GRID_INFO_T* grid /* in */) { FILE *fp; int coords[2]; int i, j; int source; MPI_Status status; // print by process No.0 in process mesh if (grid->my_rank == 0) { fp = fopen("local_A.dat","w+"); printf("%s\n", title); fprintf(fp,"Process %d > grid_row = %d, grid_col = %d\n", grid->my_rank, grid->my_row, grid->my_col); for (i = 0; i < Order(local_A); i++) { for (j = 0; j < Order(local_A); j++) fprintf(fp,"%20.15E ", Entry(local_A,i,j)); fprintf(fp, "\n"); } for (source = 1; source < grid->p; source++) { MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0, grid->comm, &status); MPI_Cart_coords(grid->comm, source, 2, coords); fprintf(fp, "Process %d > grid_row = %d, grid_col = %d\n", source, coords[0], coords[1]); for (i = 0; i < Order(temp_mat); i++) { for (j = 0; j < Order(temp_mat); j++) fprintf(fp, "%20.15E ", Entry(temp_mat,i,j)); fprintf(fp, "\n"); } } fflush(stdout); fclose(fp); } else { MPI_Send(local_A, 1, local_matrix_mpi_t, 0, 0, grid->comm); } } /* Write_local_matrices_A */ /*********************************************************/ /* Recive and Write Local Matrix for local matrix B's transpose: * Process 0 print local matrix local_A * Other Processess send local matrix local_A to process 0 * And process 0 receive local matrix local_A from other processess */ void Write_local_matrices_B( char* title /* in */, LOCAL_MATRIX_T* local_B /* in */, GRID_INFO_T* grid /* in */) { FILE *fp; int coords[2]; int i, j; int source; MPI_Status status; // print by process No.0 in process mesh if (grid->my_rank == 0) { fp = fopen("local_B.dat","w+"); printf("%s\n", title); fprintf(fp, "Process %d > grid_row = %d, grid_col = %d\n", grid->my_rank, grid->my_row, grid->my_col); for (i = 0; i < Order(local_B); i++) { for (j = 0; j < Order(local_B); j++) fprintf(fp, "%20.15E ", Entry(local_B,j,i)); // switch rows and colums in local_B, for column major storage fprintf(fp, "\n"); } for (source = 1; source < grid->p; source++) { MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0, grid->comm, &status); MPI_Cart_coords(grid->comm, source, 2, coords); fprintf(fp, "Process %d > grid_row = %d, grid_col = %d\n", source, coords[0], coords[1]); for (i = 0; i < Order(temp_mat); i++) { for (j = 0; j < Order(temp_mat); j++) fprintf(fp, "%20.15E ", Entry(temp_mat,j,i)); // switch rows and colums in local_B, for column major storage fprintf(fp, "\n"); } } fflush(stdout); fclose(fp); } else { MPI_Send(local_B, 1, local_matrix_mpi_t, 0, 0, grid->comm); } } /* Write_local_matrices_B */ /*********************************************************/ /* Recive and Write Local Matrix C: * Process 0 print local matrix local_C * Other Processess send local matrix local_C to process 0 * And process 0 receive local matrix local_C from other processess */ void Write_local_matrices_C( char* title /* in */, LOCAL_MATRIX_T* local_C /* in */, GRID_INFO_T* grid /* in */) { FILE *fp; int coords[2]; int i, j; int source; MPI_Status status; // print by process No.0 in process mesh if (grid->my_rank == 0) { fp = fopen("local_C.dat","w+"); printf("%s\n", title); fprintf(fp, "Process %d > grid_row = %d, grid_col = %d\n", grid->my_rank, grid->my_row, grid->my_col); for (i = 0; i < Order(local_C); i++) { for (j = 0; j < Order(local_C); j++) fprintf(fp, "%20.15E ", Entry(local_C,i,j)); fprintf(fp, "\n"); } for (source = 1; source < grid->p; source++) { MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0, grid->comm, &status); MPI_Cart_coords(grid->comm, source, 2, coords); fprintf(fp, "Process %d > grid_row = %d, grid_col = %d\n", source, coords[0], coords[1]); for (i = 0; i < Order(temp_mat); i++) { for (j = 0; j < Order(temp_mat); j++) fprintf(fp, "%20.15E ", Entry(temp_mat,i,j)); fprintf(fp, "\n"); } } fflush(stdout); fclose(fp); } else { MPI_Send(local_C, 1, local_matrix_mpi_t, 0, 0, grid->comm); } } /* Write_local_matrices_C */
CriticalEndLink.c
int x; int main () { #pragma omp critical { 11; } #pragma omp critical { int x; } }
pooling_hcl_arm.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2020, OPEN AI LAB * Author: qtang@openailab.com */ #include <assert.h> #include <arm_neon.h> #include "pooling_param.h" #define POOL_GENERIC 0 #define POOL_K2S2 1 #define POOL_K3S2 2 #define POOL_K3S1 3 static inline float arm64_max(float a, float b) { if (a > b) return a; else return b; } static inline float arm64_min(float a, float b) { if (a > b) return b; else return a; } typedef void (*pooling_kernel_t)(const void* input, void* output, int inc, int inh, int inw, int outh, int outw, int, int, int, int, int, int, int pad_h1, int pad_w1, int); static void avg_2x2s2(const float* input, float* output, int inc, int inh, int inw, int outh, int outw, int k_h, int k_w, int s_h, int s_w, int pad_h0, int pad_w0, int pad_h1, int pad_w1, int is_caffe) { int in_hw = inw * inh; int out_hw = outh * outw; if (pad_w1 > 0) { outw--; } if (pad_h1 > 0) { outh--; } int block_w = outw >> 2; int remain_w = inw - outw * 2; for (int c = 0; c < inc; c++) { const float* line0 = input + c * in_hw; const float* line1 = line0 + inw; float* out_ptr = output + c * out_hw; for (int i = 0; i < outh; i++) { for (int j = 0; j < block_w; j++) { float32x4_t p00 = vld1q_f32(line0); float32x4_t p10 = vld1q_f32(line1); float32x4_t sum0 = vaddq_f32(p00, p10); float32x4_t p01 = vld1q_f32(line0 + 4); float32x4_t p11 = vld1q_f32(line1 + 4); float32x4_t sum1 = vaddq_f32(p01, p11); #ifdef __aarch64__ sum0 = vpaddq_f32(sum0, sum1); #else float32x2_t sum0_1 = vpadd_f32(vget_low_f32(sum0), vget_high_f32(sum0)); float32x2_t sum0_2 = vpadd_f32(vget_low_f32(sum1), vget_high_f32(sum1)); sum0 = vcombine_f32(sum0_1, sum0_2); #endif sum0 = vmulq_n_f32(sum0, 0.25f); vst1q_f32(out_ptr, sum0); line0 += 8; line1 += 8; out_ptr += 4; } for (int j = block_w * 4; j < outw; j++) { float32x2_t p1 = vld1_f32(line0); float32x2_t p2 = vld1_f32(line1); float32x2_t sum = vadd_f32(p1, p2); *out_ptr = (sum[0] + sum[1]) * 0.25f; out_ptr++; line0 += 2; line1 += 2; } if (pad_w1) { *out_ptr = (line0[0] + line1[0]) * 0.5f; out_ptr++; } line0 += remain_w + inw; line1 += remain_w + inw; } if (pad_h1) { for (int j = 0; j < block_w; j++) { float32x4_t p00 = vld1q_f32(line0); float32x4_t p01 = vld1q_f32(line0 + 4); #ifdef __aarch64__ p00 = vpaddq_f32(p00, p01); #else float32x2_t sum0_1 = vpadd_f32(vget_low_f32(p00), vget_high_f32(p00)); float32x2_t sum0_2 = vpadd_f32(vget_low_f32(p01), vget_high_f32(p01)); p00 = vcombine_f32(sum0_1, sum0_2); #endif p00 = vmulq_n_f32(p00, 0.5f); vst1q_f32(out_ptr, p00); line0 += 8; out_ptr += 4; } for (int j = block_w * 4; j < outw; j++) { float32x2_t p1 = vld1_f32(line0); *out_ptr = (p1[0] + p1[1]) * 0.5f; out_ptr++; line0 += 2; } if (pad_w1) { *out_ptr = line0[0]; out_ptr++; } } } } static void max_2x2s2(const float* input, float* output, int inc, int inh, int inw, int outh, int outw, int k_h, int k_w, int s_h, int s_w, int pad_h0, int pad_w0, int pad_h1, int pad_w1, int is_caffe) { int in_hw = inw * inh; int out_hw = outh * outw; if (pad_w1 > 0) { outw--; } if (pad_h1 > 0) { outh--; } int block_w = outw >> 2; int remain_w = inw - outw * 2; for (int c = 0; c < inc; c++) { const float* line0 = input + c * in_hw; const float* line1 = line0 + inw; float* out_ptr = output + c * out_hw; for (int i = 0; i < outh; i++) { for (int j = 0; j < block_w; j++) { float32x4_t p00 = vld1q_f32(line0); float32x4_t p10 = vld1q_f32(line1); float32x4_t p01 = vld1q_f32(line0 + 4); float32x4_t p11 = vld1q_f32(line1 + 4); #ifdef __aarch64__ float32x4_t max0 = vmaxq_f32(p00, p10); float32x4_t max1 = vmaxq_f32(p01, p11); /* pairwaise max */ float32x4_t _max = vpmaxq_f32(max0, max1); #else float32x2_t max0_1 = vpmax_f32(vget_low_f32(p00), vget_low_f32(p10)); float32x2_t max0_2 = vpmax_f32(vget_high_f32(p00), vget_high_f32(p10)); max0_1 = vpmax_f32(max0_1, max0_2); float32x2_t max1_1 = vpmax_f32(vget_low_f32(p01), vget_low_f32(p11)); float32x2_t max1_2 = vpmax_f32(vget_high_f32(p01), vget_high_f32(p11)); max1_1 = vpmax_f32(max1_1, max1_2); float32x4_t _max = vcombine_f32(max0_1, max1_1); #endif vst1q_f32(out_ptr, _max); line0 += 8; line1 += 8; out_ptr += 4; } for (int j = block_w * 4; j < outw; j++) { float32x2_t p1 = vld1_f32(line0); float32x2_t p2 = vld1_f32(line1); float32x2_t _max = vmax_f32(p1, p2); *out_ptr = arm64_max(_max[0], _max[1]); out_ptr++; line0 += 2; line1 += 2; } if (pad_w1 > 0) { *out_ptr = arm64_max(line0[0], line1[0]); out_ptr++; } line0 += remain_w + inw; line1 += remain_w + inw; } if (pad_h1 > 0) { for (int j = 0; j < block_w; j++) { float32x4_t p00 = vld1q_f32(line0); float32x4_t p01 = vld1q_f32(line0 + 4); #ifdef __aarch64__ p00 = vpmaxq_f32(p00, p01); #else float32x2_t max0_1 = vpmax_f32(vget_low_f32(p00), vget_high_f32(p00)); float32x2_t max0_2 = vpmax_f32(vget_low_f32(p01), vget_high_f32(p01)); p00 = vcombine_f32(max0_1, max0_2); #endif vst1q_f32(out_ptr, p00); line0 += 8; out_ptr += 4; } for (int j = block_w * 4; j < outw; j++) { float32x2_t p1 = vld1_f32(line0); *out_ptr = arm64_max(p1[0], p1[1]); out_ptr++; line0 += 2; } if (pad_w1 > 0) { *out_ptr = line0[0]; out_ptr++; } } } } static void avg_3x3s2(const float* input, float* output, int inc, int inh, int inw, int outh, int outw, int k_h, int k_w, int s_h, int s_w, int pad_h0, int pad_w0, int pad_h1, int pad_w1, int is_caffe) { int in_hw = inw * inh; int out_hw = outh * outw; if (pad_w1 > 0) { outw--; } if (pad_h1 > 0) { outh--; } int block_w = outw >> 2; int remain_w = inw - outw * 2; for (int c = 0; c < inc; c++) { const float* line0 = input + c * in_hw; const float* line1 = line0 + inw; const float* line2 = line1 + inw; float* out_ptr = output + c * out_hw; for (int i = 0; i < outh; i++) { float32x4x2_t p00 = vld2q_f32(line0); float32x4x2_t p10 = vld2q_f32(line1); float32x4x2_t p20 = vld2q_f32(line2); for (int j = 0; j < block_w; j++) { float32x4x2_t p00_new = vld2q_f32(line0 + 8); float32x4_t sum0 = vaddq_f32(p00.val[0], p00.val[1]); float32x4_t p01 = vextq_f32(p00.val[0], p00_new.val[0], 1); sum0 = vaddq_f32(sum0, p01); float32x4x2_t p10_new = vld2q_f32(line1 + 8); float32x4_t sum1 = vaddq_f32(p10.val[0], p10.val[1]); float32x4_t p11 = vextq_f32(p10.val[0], p10_new.val[0], 1); sum1 = vaddq_f32(sum1, p11); float32x4x2_t p20_new = vld2q_f32(line2 + 8); float32x4_t sum2 = vaddq_f32(p20.val[0], p20.val[1]); float32x4_t p21 = vextq_f32(p20.val[0], p20_new.val[0], 1); sum2 = vaddq_f32(sum2, p21); sum0 = vaddq_f32(vaddq_f32(sum0, sum1), sum2); sum0 = vmulq_n_f32(sum0, 0.11111111f); vst1q_f32(out_ptr, sum0); p00 = p00_new; p10 = p10_new; p20 = p20_new; line0 += 8; line1 += 8; line2 += 8; out_ptr += 4; } for (int j = block_w * 4; j < outw; j++) { *out_ptr = (line0[0] + line0[1] + line0[2] + line1[0] + line1[1] + line1[2] + line2[0] + line2[1] + line2[2]) * 0.11111111f; out_ptr++; line0 += 2; line1 += 2; line2 += 2; } if (pad_w1 == 1) { *out_ptr = (line0[0] + line0[1] + line1[0] + line1[1] + line2[0] + line2[1]) * 0.16666667f; out_ptr++; } line0 += remain_w + inw; line1 += remain_w + inw; line2 += remain_w + inw; } if (pad_h1 == 1) { float32x4x2_t p00 = vld2q_f32(line0); float32x4x2_t p10 = vld2q_f32(line1); for (int j = 0; j < block_w; j++) { float32x4x2_t p00_new = vld2q_f32(line0 + 8); float32x4_t sum0 = vaddq_f32(p00.val[0], p00.val[1]); float32x4_t p01 = vextq_f32(p00.val[0], p00_new.val[0], 1); sum0 = vaddq_f32(sum0, p01); float32x4x2_t p10_new = vld2q_f32(line1 + 8); float32x4_t sum1 = vaddq_f32(p10.val[0], p10.val[1]); float32x4_t p11 = vextq_f32(p10.val[0], p10_new.val[0], 1); sum1 = vaddq_f32(sum1, p11); sum0 = vaddq_f32(sum0, sum1); sum0 = vmulq_n_f32(sum0, 0.16666667f); vst1q_f32(out_ptr, sum0); p00 = p00_new; p10 = p10_new; line0 += 8; line1 += 8; out_ptr += 4; } for (int j = block_w * 4; j < outw; j++) { *out_ptr = (line0[0] + line0[1] + line0[2] + line1[0] + line1[1] + line1[2]) * 0.16666667f; out_ptr++; line0 += 2; line1 += 2; } if (pad_w1 == 1) { *out_ptr = (line0[0] + line0[1] + line1[0] + line1[1]) * 0.25f; out_ptr++; } else if (pad_w1 == 2) { *out_ptr = (line0[0] + line1[0]) * 0.5f; out_ptr++; } } else if (pad_h1 == 2) { float32x4x2_t p00 = vld2q_f32(line0); for (int j = 0; j < block_w; j++) { float32x4x2_t p00_new = vld2q_f32(line0 + 8); float32x4_t sum0 = vaddq_f32(p00.val[0], p00.val[1]); float32x4_t p01 = vextq_f32(p00.val[0], p00_new.val[0], 1); sum0 = vaddq_f32(sum0, p01); sum0 = vmulq_n_f32(sum0, 0.3333333f); vst1q_f32(out_ptr, sum0); p00 = p00_new; line0 += 8; out_ptr += 4; } for (int j = block_w * 4; j < outw; j++) { *out_ptr = (line0[0] + line0[1] + line0[2]) * 0.3333333f; out_ptr++; line0 += 2; } if (pad_w1 == 1) { *out_ptr = (line0[0] + line0[1]) * 0.5f; out_ptr++; } else if (pad_w1 == 2) { *out_ptr = line0[0]; out_ptr++; } } } } static void max_3x3s2(const float* input, float* output, int inc, int inh, int inw, int outh, int outw, int k_h, int k_w, int s_h, int s_w, int pad_h0, int pad_w0, int pad_h1, int pad_w1, int is_caffe) { int in_hw = inw * inh; int out_hw = outh * outw; if (pad_w1 > 0) { outw--; } if (pad_h1 > 0) { outh--; } int block_w = outw >> 2; int remain_w = inw - outw * 2; for (int c = 0; c < inc; c++) { const float* line0 = input + c * in_hw; const float* line1 = line0 + inw; const float* line2 = line1 + inw; float* out_ptr = output + c * out_hw; for (int i = 0; i < outh; i++) { float32x4x2_t p00 = vld2q_f32(line0); float32x4x2_t p10 = vld2q_f32(line1); float32x4x2_t p20 = vld2q_f32(line2); for (int j = 0; j < block_w; j++) { /* p00 = [1,2,3,4,5,6,7,8] p00.val[0]=[1,3,5,7] max0 = [2,4,6,8] p00_new = [9,10,11,12,13,14,15,16] p01 = [3,5,7,9] max0=max(max0,p01)=[3,5,7,9] */ float32x4x2_t p00_new = vld2q_f32(line0 + 8); float32x4_t max0 = vmaxq_f32(p00.val[0], p00.val[1]); float32x4_t p01 = vextq_f32(p00.val[0], p00_new.val[0], 1); max0 = vmaxq_f32(max0, p01); float32x4x2_t p10_new = vld2q_f32(line1 + 8); float32x4_t max1 = vmaxq_f32(p10.val[0], p10.val[1]); float32x4_t p11 = vextq_f32(p10.val[0], p10_new.val[0], 1); max1 = vmaxq_f32(max1, p11); float32x4x2_t p20_new = vld2q_f32(line2 + 8); float32x4_t max2 = vmaxq_f32(p20.val[0], p20.val[1]); float32x4_t p21 = vextq_f32(p20.val[0], p20_new.val[0], 1); max2 = vmaxq_f32(max2, p21); max0 = vmaxq_f32(vmaxq_f32(max0, max1), max2); vst1q_f32(out_ptr, max0); p00 = p00_new; p10 = p10_new; p20 = p20_new; line0 += 8; line1 += 8; line2 += 8; out_ptr += 4; } for (int j = block_w * 4; j < outw; j++) { float max0 = arm64_max(arm64_max(line0[0], line0[1]), line0[2]); float max1 = arm64_max(arm64_max(line1[0], line1[1]), line1[2]); float max2 = arm64_max(arm64_max(line2[0], line2[1]), line2[2]); *out_ptr = arm64_max(arm64_max(max0, max1), max2); out_ptr++; line0 += 2; line1 += 2; line2 += 2; } if (pad_w1 == 1) { float max0 = arm64_max(arm64_max(line0[0], line0[1]), arm64_max(line1[0], line1[1])); *out_ptr = arm64_max(arm64_max(line2[0], line2[1]), max0); out_ptr++; } line0 += remain_w + inw; line1 += remain_w + inw; line2 += remain_w + inw; } if (pad_h1 == 1) { float32x4x2_t p00 = vld2q_f32(line0); float32x4x2_t p10 = vld2q_f32(line1); for (int j = 0; j < block_w; j++) { float32x4x2_t p00_new = vld2q_f32(line0 + 8); float32x4_t max0 = vmaxq_f32(p00.val[0], p00.val[1]); float32x4_t p01 = vextq_f32(p00.val[0], p00_new.val[0], 1); max0 = vmaxq_f32(max0, p01); float32x4x2_t p10_new = vld2q_f32(line1 + 8); float32x4_t max1 = vmaxq_f32(p10.val[0], p10.val[1]); float32x4_t p11 = vextq_f32(p10.val[0], p10_new.val[0], 1); max1 = vmaxq_f32(max1, p11); vst1q_f32(out_ptr, vmaxq_f32(max0, max1)); p00 = p00_new; p10 = p10_new; line0 += 8; line1 += 8; out_ptr += 4; } for (int j = block_w * 4; j < outw; j++) { float max0 = arm64_max(arm64_max(line0[0], line0[1]), line0[2]); float max1 = arm64_max(arm64_max(line1[0], line1[1]), line1[2]); *out_ptr = arm64_max(max0, max1); out_ptr++; line0 += 2; line1 += 2; } if (pad_w1 == 1) { *out_ptr = arm64_max(arm64_max(line0[0], line0[1]), arm64_max(line1[0], line1[1])); out_ptr++; } } } } static void avg_2x2s2_p1(const float* input, float* output, int inc, int inh, int inw, int outh, int outw, int k_h, int k_w, int s_h, int s_w, int pad_h0, int pad_w0, int pad_h1, int pad_w1, int is_caffe) { int in_hw = inw * inh; int out_hw = outh * outw; if (inw % 2 == 0) outw--; if (inh % 2 == 0) outh--; int block_w = (outw - 1) >> 2; int remain_w = inw - outw * 2 + 1; for (int c = 0; c < inc; c++) { const float* line00 = input + c * in_hw; float* out_ptr = output + c * out_hw; // h begin if (is_caffe == 0) *out_ptr = line00[0]; else *out_ptr = line00[0] * 0.25f; out_ptr++; line00++; for (int j = 0; j < block_w; j++) { float32x4_t p00 = vld1q_f32(line00); float32x4_t p01 = vld1q_f32(line00 + 4); #ifdef __aarch64__ float32x4_t sum0 = vpaddq_f32(p00, p01); #else float32x2_t sum0_1 = vpadd_f32(vget_low_f32(p00), vget_high_f32(p00)); float32x2_t sum0_2 = vpadd_f32(vget_low_f32(p01), vget_high_f32(p01)); float32x4_t sum0 = vcombine_f32(sum0_1, sum0_2); #endif if (is_caffe == 0) sum0 = vmulq_n_f32(sum0, 0.5f); else sum0 = vmulq_n_f32(sum0, 0.25f); vst1q_f32(out_ptr, sum0); line00 += 8; out_ptr += 4; } for (int j = block_w * 4 + 1; j < outw; j++) { if (is_caffe == 0) *out_ptr = (line00[0] + line00[1]) * 0.5f; else *out_ptr = (line00[0] + line00[1]) * 0.25f; out_ptr++; line00 += 2; } if (inw % 2 == 0) { if (is_caffe == 0) *out_ptr = line00[0]; else *out_ptr = line00[0] * 0.25f; out_ptr++; } line00 += remain_w; // h center const float* line0 = line00; const float* line1 = line0 + inw; for (int i = 1; i < outh; i++) { // w begin if (is_caffe == 0) *out_ptr = (line0[0] + line1[0]) * 0.5f; else *out_ptr = (line0[0] + line1[0]) * 0.25f; out_ptr++; line0++; line1++; // w center for (int j = 0; j < block_w; j++) { float32x4_t p00 = vld1q_f32(line0); float32x4_t p10 = vld1q_f32(line1); float32x4_t sum0 = vaddq_f32(p00, p10); float32x4_t p01 = vld1q_f32(line0 + 4); float32x4_t p11 = vld1q_f32(line1 + 4); float32x4_t sum1 = vaddq_f32(p01, p11); #ifdef __aarch64__ float32x4_t _sum = vpaddq_f32(sum0, sum1); #else float32x2_t sum0_1 = vpadd_f32(vget_low_f32(sum0), vget_high_f32(sum0)); float32x2_t sum0_2 = vpadd_f32(vget_low_f32(sum1), vget_high_f32(sum1)); float32x4_t _sum = vcombine_f32(sum0_1, sum0_2); #endif _sum = vmulq_n_f32(_sum, 0.25f); vst1q_f32(out_ptr, _sum); out_ptr += 4; line0 += 8; line1 += 8; } for (int j = block_w * 4 + 1; j < outw; j++) { *out_ptr = (line0[0] + line0[1] + line1[0] + line1[1]) * 0.25f; out_ptr++; line0 += 2; line1 += 2; } // w end if (inw % 2 == 0) { if (is_caffe == 0) *out_ptr = (line0[0] + line1[0]) * 0.5f; else *out_ptr = (line0[0] + line1[0]) * 0.25f; out_ptr++; } line0 += remain_w + inw; line1 += remain_w + inw; } // h end if (inh % 2 == 0) { if (is_caffe == 0) *out_ptr = line0[0]; else *out_ptr = line0[0] * 0.25f; out_ptr++; line0++; for (int j = 0; j < block_w; j++) { float32x4_t p00 = vld1q_f32(line0); float32x4_t p01 = vld1q_f32(line0 + 4); #ifdef __aarch64__ float32x4_t _sum = vpaddq_f32(p00, p01); #else float32x2_t sum0_1 = vpadd_f32(vget_low_f32(p00), vget_high_f32(p00)); float32x2_t sum0_2 = vpadd_f32(vget_low_f32(p01), vget_high_f32(p01)); float32x4_t _sum = vcombine_f32(sum0_1, sum0_2); #endif if (is_caffe == 0) _sum = vmulq_n_f32(_sum, 0.5f); else _sum = vmulq_n_f32(_sum, 0.25f); vst1q_f32(out_ptr, _sum); out_ptr += 4; line0 += 8; } for (int j = block_w * 4 + 1; j < outw; j++) { if (is_caffe == 0) *out_ptr = (line0[0] + line0[1]) * 0.5f; else *out_ptr = (line0[0] + line0[1]) * 0.25f; out_ptr++; line0 += 2; } if (inw % 2 == 0) { if (is_caffe == 0) *out_ptr = line0[0]; else *out_ptr = line0[0] * 0.25f; } } } } static void max_2x2s2_p1(const float* input, float* output, int inc, int inh, int inw, int outh, int outw, int k_h, int k_w, int s_h, int s_w, int pad_h0, int pad_w0, int pad_h1, int pad_w1, int is_caffe) { int in_hw = inw * inh; int out_hw = outh * outw; if (inw % 2 == 0) outw--; if (inh % 2 == 0) outh--; int block_w = (outw - 1) >> 2; int remain_w = inw - outw * 2 + 1; for (int c = 0; c < inc; c++) { const float* line00 = input + c * in_hw; float* out_ptr = output + c * out_hw; // h begin *out_ptr = line00[0]; out_ptr++; line00++; for (int j = 0; j < block_w; j++) { float32x4_t p00 = vld1q_f32(line00); float32x4_t p01 = vld1q_f32(line00 + 4); #ifdef __aarch64__ float32x4_t _max = vpmaxq_f32(p00, p01); #else float32x2_t max0_1 = vpmax_f32(vget_low_f32(p00), vget_high_f32(p00)); float32x2_t max0_2 = vpmax_f32(vget_low_f32(p01), vget_high_f32(p01)); float32x4_t _max = vcombine_f32(max0_1, max0_2); #endif vst1q_f32(out_ptr, _max); out_ptr += 4; line00 += 8; } for (int j = block_w * 4 + 1; j < outw; j++) { *out_ptr = arm64_max(line00[0], line00[1]); out_ptr++; line00 += 2; } if (inw % 2 == 0) { *out_ptr = line00[0]; out_ptr++; } line00 += remain_w; // h center const float* line0 = line00; const float* line1 = line0 + inw; for (int i = 1; i < outh; i++) { // w begin *out_ptr = arm64_max(line0[0], line1[0]); out_ptr++; line0++; line1++; // w center for (int j = 0; j < block_w; j++) { float32x4_t p00 = vld1q_f32(line0); float32x4_t p10 = vld1q_f32(line1); float32x4_t p01 = vld1q_f32(line0 + 4); float32x4_t p11 = vld1q_f32(line1 + 4); #ifdef __aarch64__ float32x4_t max0 = vmaxq_f32(p00, p10); float32x4_t max1 = vmaxq_f32(p01, p11); float32x4_t _max = vpmaxq_f32(max0, max1); #else float32x2_t max0_1 = vpmax_f32(vget_low_f32(p00), vget_low_f32(p10)); float32x2_t max0_2 = vpmax_f32(vget_high_f32(p00), vget_high_f32(p10)); max0_1 = vpmax_f32(max0_1, max0_2); float32x2_t max1_1 = vpmax_f32(vget_low_f32(p01), vget_low_f32(p11)); float32x2_t max1_2 = vpmax_f32(vget_high_f32(p01), vget_high_f32(p11)); max1_1 = vpmax_f32(max1_1, max1_2); float32x4_t _max = vcombine_f32(max0_1, max1_1); #endif vst1q_f32(out_ptr, _max); out_ptr += 4; line0 += 8; line1 += 8; } for (int j = block_w * 4 + 1; j < outw; j++) { float32x2_t p1 = vld1_f32(line0); float32x2_t p2 = vld1_f32(line1); float32x2_t _max = vmax_f32(p1, p2); *out_ptr = arm64_max(_max[0], _max[1]); out_ptr++; line0 += 2; line1 += 2; } // w end if (inw % 2 == 0) { *out_ptr = arm64_max(line0[0], line1[0]); out_ptr++; } line0 += remain_w + inw; line1 += remain_w + inw; } // h end if (inh % 2 == 0) { *out_ptr = line0[0]; out_ptr++; line0++; for (int j = 0; j < block_w; j++) { float32x4_t p00 = vld1q_f32(line0); float32x4_t p01 = vld1q_f32(line0 + 4); #ifdef __aarch64__ float32x4_t _max = vpmaxq_f32(p00, p01); #else float32x2_t max0_1 = vpmax_f32(vget_low_f32(p00), vget_high_f32(p00)); float32x2_t max0_2 = vpmax_f32(vget_low_f32(p01), vget_high_f32(p01)); float32x4_t _max = vcombine_f32(max0_1, max0_2); #endif vst1q_f32(out_ptr, _max); out_ptr += 4; line0 += 8; } for (int j = block_w * 4 + 1; j < outw; j++) { *out_ptr = arm64_max(line0[0], line0[1]); out_ptr++; line0 += 2; } if (inw % 2 == 0) { *out_ptr = line0[0]; } } } } static void max_3x3s2_p1(const float* input, float* output, int inc, int inh, int inw, int outh, int outw, int k_h, int k_w, int s_h, int s_w, int pad_h0, int pad_w0, int pad_h1, int pad_w1, int is_caffe) { // fprintf(stderr, "max_3x3s2_p1\n"); int in_hw = inw * inh; int out_hw = outh * outw; if (is_caffe == 1 || inw % 2 == 1) outw--; if (is_caffe == 1 || inh % 2 == 1) outh--; int block_w = (outw - 1) >> 2; int remain_w = inw - outw * 2 + 1; for (int c = 0; c < inc; c++) { const float* line1 = input + c * in_hw; const float* line2 = line1 + inw; float* out_ptr = output + c * out_hw; // h begin --------------------------------------- *out_ptr = arm64_max(arm64_max(line1[0], line1[1]), arm64_max(line2[0], line2[1])); out_ptr++; line1 += 1; line2 += 1; float32x4x2_t p10 = vld2q_f32(line1); float32x4x2_t p20 = vld2q_f32(line2); for (int j = 0; j < block_w; j++) { float32x4x2_t p10_new = vld2q_f32(line1 + 8); float32x4_t max1 = vmaxq_f32(p10.val[0], p10.val[1]); float32x4_t p11 = vextq_f32(p10.val[0], p10_new.val[0], 1); max1 = vmaxq_f32(max1, p11); float32x4x2_t p20_new = vld2q_f32(line2 + 8); float32x4_t max2 = vmaxq_f32(p20.val[0], p20.val[1]); float32x4_t p21 = vextq_f32(p20.val[0], p20_new.val[0], 1); max2 = vmaxq_f32(max2, p21); max1 = vmaxq_f32(max1, max2); vst1q_f32(out_ptr, max1); p10 = p10_new; p20 = p20_new; line1 += 8; line2 += 8; out_ptr += 4; } for (int j = block_w * 4 + 1; j < outw; j++) { float max1 = arm64_max(arm64_max(line1[0], line1[1]), line1[2]); float max2 = arm64_max(arm64_max(line2[0], line2[1]), line2[2]); *out_ptr = arm64_max(max1, max2); out_ptr++; line1 += 2; line2 += 2; } if (inw % 2 == 1) { *out_ptr = arm64_max(arm64_max(line1[0], line1[1]), arm64_max(line2[0], line2[1])); out_ptr++; } else if (is_caffe == 1 && inw % 2 == 0) { *out_ptr = arm64_max(line1[0], line2[0]); out_ptr++; } line1 += remain_w; line2 += remain_w; // h center --------------------------------------- const float* line0 = line1; line1 = line2; line2 = line1 + inw; for (int i = 1; i < outh; i++) { // left float max0 = arm64_max(arm64_max(line1[0], line1[1]), arm64_max(line2[0], line2[1])); *out_ptr = arm64_max(arm64_max(line0[0], line0[1]), max0); out_ptr++; line0 += 1; line1 += 1; line2 += 1; // mid float32x4x2_t p00 = vld2q_f32(line0); float32x4x2_t p10 = vld2q_f32(line1); float32x4x2_t p20 = vld2q_f32(line2); for (int j = 0; j < block_w; j++) { float32x4x2_t p00_new = vld2q_f32(line0 + 8); float32x4_t max0 = vmaxq_f32(p00.val[0], p00.val[1]); float32x4_t p01 = vextq_f32(p00.val[0], p00_new.val[0], 1); max0 = vmaxq_f32(max0, p01); float32x4x2_t p10_new = vld2q_f32(line1 + 8); float32x4_t max1 = vmaxq_f32(p10.val[0], p10.val[1]); float32x4_t p11 = vextq_f32(p10.val[0], p10_new.val[0], 1); max1 = vmaxq_f32(max1, p11); float32x4x2_t p20_new = vld2q_f32(line2 + 8); float32x4_t max2 = vmaxq_f32(p20.val[0], p20.val[1]); float32x4_t p21 = vextq_f32(p20.val[0], p20_new.val[0], 1); max2 = vmaxq_f32(max2, p21); max0 = vmaxq_f32(vmaxq_f32(max0, max1), max2); vst1q_f32(out_ptr, max0); p00 = p00_new; p10 = p10_new; p20 = p20_new; line0 += 8; line1 += 8; line2 += 8; out_ptr += 4; } for (int j = block_w * 4 + 1; j < outw; j++) { float max0 = arm64_max(arm64_max(line0[0], line0[1]), line0[2]); float max1 = arm64_max(arm64_max(line1[0], line1[1]), line1[2]); float max2 = arm64_max(arm64_max(line2[0], line2[1]), line2[2]); *out_ptr = arm64_max(arm64_max(max0, max1), max2); out_ptr++; line0 += 2; line1 += 2; line2 += 2; } if (inw % 2 == 1) { max0 = arm64_max(arm64_max(line1[0], line1[1]), arm64_max(line2[0], line2[1])); *out_ptr = arm64_max(arm64_max(line0[0], line0[1]), max0); out_ptr++; } else if (inw % 2 == 0 && is_caffe == 1) { *out_ptr = arm64_max(arm64_max(line0[0], line1[0]), line2[0]); out_ptr++; } line0 += inw + remain_w; line1 += inw + remain_w; line2 += inw + remain_w; } // h end ------------------------------------------ if (inh % 2 == 1) { *out_ptr = arm64_max(arm64_max(line1[0], line1[1]), arm64_max(line0[0], line0[1])); out_ptr++; line0 += 1; line1 += 1; float32x4x2_t p00 = vld2q_f32(line0); float32x4x2_t p10 = vld2q_f32(line1); for (int j = 0; j < block_w; j++) { float32x4x2_t p00_new = vld2q_f32(line0 + 8); float32x4_t max0 = vmaxq_f32(p00.val[0], p00.val[1]); float32x4_t p01 = vextq_f32(p00.val[0], p00_new.val[0], 1); max0 = vmaxq_f32(max0, p01); float32x4x2_t p10_new = vld2q_f32(line1 + 8); float32x4_t max1 = vmaxq_f32(p10.val[0], p10.val[1]); float32x4_t p11 = vextq_f32(p10.val[0], p10_new.val[0], 1); max1 = vmaxq_f32(max1, p11); max0 = vmaxq_f32(max0, max1); vst1q_f32(out_ptr, max0); p00 = p00_new; p10 = p10_new; line0 += 8; line1 += 8; out_ptr += 4; } for (int j = block_w * 4 + 1; j < outw; j++) { float max0 = arm64_max(arm64_max(line0[0], line0[1]), line0[2]); float max1 = arm64_max(arm64_max(line1[0], line1[1]), line1[2]); *out_ptr = arm64_max(max0, max1); out_ptr++; line0 += 2; line1 += 2; } if (inw % 2 == 1) { *out_ptr = arm64_max(arm64_max(line1[0], line1[1]), arm64_max(line0[0], line0[1])); out_ptr++; } else if (inw % 2 == 0 && is_caffe == 1) { *out_ptr = arm64_max(line0[0], line1[0]); out_ptr++; } } else if (inh % 2 == 0 && is_caffe == 1) { *out_ptr = arm64_max(line0[0], line0[1]); out_ptr++; line0 += 1; float32x4x2_t p00 = vld2q_f32(line0); for (int j = 0; j < block_w; j++) { float32x4x2_t p00_new = vld2q_f32(line0 + 8); float32x4_t max0 = vmaxq_f32(p00.val[0], p00.val[1]); float32x4_t p01 = vextq_f32(p00.val[0], p00_new.val[0], 1); max0 = vmaxq_f32(max0, p01); vst1q_f32(out_ptr, max0); p00 = p00_new; line0 += 8; out_ptr += 4; } for (int j = block_w * 4 + 1; j < outw; j++) { *out_ptr = arm64_max(arm64_max(line0[0], line0[1]), line0[2]); out_ptr++; line0 += 2; } if (inw % 2 == 1) { *out_ptr = arm64_max(line0[0], line0[1]); out_ptr++; } else if (inw % 2 == 0) { *out_ptr = line0[0]; } } } } static void avg_3x3s2_p1(const float* input, float* output, int inc, int inh, int inw, int outh, int outw, int k_h, int k_w, int s_h, int s_w, int pad_h0, int pad_w0, int pad_h1, int pad_w1, int is_caffe) { int in_hw = inw * inh; int out_hw = outh * outw; if (is_caffe == 1 || inw % 2 == 1) outw--; if (is_caffe == 1 || inh % 2 == 1) outh--; int block_w = (outw - 1) >> 2; int remain_w = inw - outw * 2 + 1; for (int c = 0; c < inc; c++) { const float* line1 = input + c * in_hw; const float* line2 = line1 + inw; float* out_ptr = output + c * out_hw; // h begin --------------------------------------- if (is_caffe == 0) *out_ptr = (line1[0] + line1[1] + line2[0] + line2[1]) * 0.25f; else *out_ptr = (line1[0] + line1[1] + line2[0] + line2[1]) * 0.11111111f; out_ptr++; line1 += 1; line2 += 1; float32x4x2_t p10 = vld2q_f32(line1); float32x4x2_t p20 = vld2q_f32(line2); for (int j = 0; j < block_w; j++) { float32x4x2_t p10_new = vld2q_f32(line1 + 8); float32x4_t sum1 = vaddq_f32(p10.val[0], p10.val[1]); float32x4_t p11 = vextq_f32(p10.val[0], p10_new.val[0], 1); sum1 = vaddq_f32(sum1, p11); float32x4x2_t p20_new = vld2q_f32(line2 + 8); float32x4_t sum2 = vaddq_f32(p20.val[0], p20.val[1]); float32x4_t p21 = vextq_f32(p20.val[0], p20_new.val[0], 1); sum2 = vaddq_f32(sum2, p21); sum1 = vaddq_f32(sum1, sum2); if (is_caffe == 0) sum1 = vmulq_n_f32(sum1, 0.16666667f); else sum1 = vmulq_n_f32(sum1, 0.11111111f); vst1q_f32(out_ptr, sum1); p10 = p10_new; p20 = p20_new; line1 += 8; line2 += 8; out_ptr += 4; } for (int j = block_w * 4 + 1; j < outw; j++) { if (is_caffe == 0) *out_ptr = (line1[0] + line1[1] + line1[2] + line2[0] + line2[1] + line2[2]) * 0.16666667f; else *out_ptr = (line1[0] + line1[1] + line1[2] + line2[0] + line2[1] + line2[2]) * 0.11111111f; out_ptr++; line1 += 2; line2 += 2; } if (inw % 2 == 1) { if (is_caffe == 0) *out_ptr = (line1[0] + line1[1] + line2[0] + line2[1]) * 0.25f; else *out_ptr = (line1[0] + line1[1] + line2[0] + line2[1]) * 0.11111111f; out_ptr++; } else if (inw % 2 == 0 && is_caffe == 1) { *out_ptr = (line1[0] + line2[0]) * 0.16666667f; out_ptr++; } line1 += remain_w; line2 += remain_w; // h center --------------------------------------- const float* line0 = line1; line1 = line2; line2 = line1 + inw; for (int i = 1; i < outh; i++) { // left if (is_caffe == 0) *out_ptr = (line0[0] + line0[1] + line1[0] + line1[1] + line2[0] + line2[1]) * 0.16666667f; else *out_ptr = (line0[0] + line0[1] + line1[0] + line1[1] + line2[0] + line2[1]) * 0.11111111f; out_ptr++; line0 += 1; line1 += 1; line2 += 1; // mid float32x4x2_t p00 = vld2q_f32(line0); float32x4x2_t p10 = vld2q_f32(line1); float32x4x2_t p20 = vld2q_f32(line2); for (int j = 0; j < block_w; j++) { float32x4x2_t p00_new = vld2q_f32(line0 + 8); float32x4_t sum0 = vaddq_f32(p00.val[0], p00.val[1]); float32x4_t p01 = vextq_f32(p00.val[0], p00_new.val[0], 1); sum0 = vaddq_f32(sum0, p01); float32x4x2_t p10_new = vld2q_f32(line1 + 8); float32x4_t sum1 = vaddq_f32(p10.val[0], p10.val[1]); float32x4_t p11 = vextq_f32(p10.val[0], p10_new.val[0], 1); sum1 = vaddq_f32(sum1, p11); float32x4x2_t p20_new = vld2q_f32(line2 + 8); float32x4_t sum2 = vaddq_f32(p20.val[0], p20.val[1]); float32x4_t p21 = vextq_f32(p20.val[0], p20_new.val[0], 1); sum2 = vaddq_f32(sum2, p21); sum0 = vaddq_f32(vaddq_f32(sum0, sum1), sum2); sum0 = vmulq_n_f32(sum0, 0.11111111f); vst1q_f32(out_ptr, sum0); p00 = p00_new; p10 = p10_new; p20 = p20_new; line0 += 8; line1 += 8; line2 += 8; out_ptr += 4; } for (int j = block_w * 4 + 1; j < outw; j++) { *out_ptr = (line0[0] + line0[1] + line0[2] + line1[0] + line1[1] + line1[2] + line2[0] + line2[1] + line2[2]) * 0.11111111f; out_ptr++; line0 += 2; line1 += 2; line2 += 2; } // end if (inw % 2 == 1) { if (is_caffe == 0) *out_ptr = (line0[0] + line0[1] + line1[0] + line1[1] + line2[0] + line2[1]) * 0.16666667f; else *out_ptr = (line0[0] + line0[1] + line1[0] + line1[1] + line2[0] + line2[1]) * 0.11111111f; out_ptr++; } else if (inw % 2 == 0 && is_caffe == 1) { *out_ptr = (line0[0] + line1[0] + line2[0]) * 0.16666667f; out_ptr++; } line0 += remain_w + inw; line1 += remain_w + inw; line2 += remain_w + inw; } // h end------------------------------- if (inh % 2 == 1) { if (is_caffe == 0) *out_ptr = (line0[0] + line0[1] + line1[0] + line1[1]) * 0.25f; else *out_ptr = (line0[0] + line0[1] + line1[0] + line1[1]) * 0.11111111f; out_ptr++; line0 += 1; line1 += 1; float32x4x2_t p00 = vld2q_f32(line0); float32x4x2_t p10 = vld2q_f32(line1); for (int j = 0; j < block_w; j++) { float32x4x2_t p00_new = vld2q_f32(line0 + 8); float32x4_t sum0 = vaddq_f32(p00.val[0], p00.val[1]); float32x4_t p01 = vextq_f32(p00.val[0], p00_new.val[0], 1); sum0 = vaddq_f32(sum0, p01); float32x4x2_t p10_new = vld2q_f32(line1 + 8); float32x4_t sum1 = vaddq_f32(p10.val[0], p10.val[1]); float32x4_t p11 = vextq_f32(p10.val[0], p10_new.val[0], 1); sum1 = vaddq_f32(sum1, p11); sum0 = vaddq_f32(sum0, sum1); if (is_caffe == 0) sum0 = vmulq_n_f32(sum0, 0.16666667f); else sum0 = vmulq_n_f32(sum0, 0.11111111f); vst1q_f32(out_ptr, sum0); p00 = p00_new; p10 = p10_new; line0 += 8; line1 += 8; out_ptr += 4; } for (int j = block_w * 4 + 1; j < outw; j++) { if (is_caffe == 0) *out_ptr = (line0[0] + line0[1] + line0[2] + line1[0] + line1[1] + line1[2]) * 0.16666667f; else *out_ptr = (line0[0] + line0[1] + line0[2] + line1[0] + line1[1] + line1[2]) * 0.11111111f; out_ptr++; line0 += 2; line1 += 2; } if (inw % 2 == 1) { if (is_caffe == 0) *out_ptr = (line0[0] + line0[1] + line1[0] + line1[1]) * 0.25f; else *out_ptr = (line0[0] + line0[1] + line1[0] + line1[1]) * 0.11111111f; out_ptr++; } else if (inw % 2 == 0 && is_caffe == 1) { *out_ptr = (line0[0] + line1[0]) * 0.16666667f; out_ptr++; } } else if (inw % 2 == 0 && is_caffe == 1) { *out_ptr = (line0[0] + line0[1]) * 0.16666667f; out_ptr++; line0 += 1; float32x4x2_t p00 = vld2q_f32(line0); for (int j = 0; j < block_w; j++) { float32x4x2_t p00_new = vld2q_f32(line0 + 8); float32x4_t sum0 = vaddq_f32(p00.val[0], p00.val[1]); float32x4_t p01 = vextq_f32(p00.val[0], p00_new.val[0], 1); sum0 = vaddq_f32(sum0, p01); sum0 = vmulq_n_f32(sum0, 0.16666667f); vst1q_f32(out_ptr, sum0); p00 = p00_new; line0 += 8; out_ptr += 4; } for (int j = block_w * 4 + 1; j < outw; j++) { *out_ptr = (line0[0] + line0[1] + line0[2]) * 0.16666667f; out_ptr++; line0 += 2; } if (inw % 2 == 1) { *out_ptr = (line0[0] + line0[1]) * 0.16666667f; out_ptr++; } else if (inw % 2 == 0) { *out_ptr = line0[0] * 0.25f; out_ptr++; } } } } static void max_3x3s1_p1(const float* input, float* output, int inc, int inh, int inw, int outh, int outw, int k_h, int k_w, int s_h, int s_w, int pad_h0, int pad_w0, int pad_h1, int pad_w1, int is_caffe) { // fprintf(stderr, "max_3x3s1_p1\n"); int in_hw = inw * inh; int mid_w = inw - 2; int mid_h = inh - 2; for (int c = 0; c < inc; c++) { const float* line1 = input + c * in_hw; const float* line2 = line1 + inw; float* out_ptr = output + c * in_hw; // h begin left----[line1+=0]----------------------------------- *out_ptr = arm64_max(arm64_max(line1[0], line1[1]), arm64_max(line2[0], line2[1])); out_ptr++; // h begin center----[line1+=1]---------------------------------- for (int j = 0; j < mid_w; j++) { float max1 = arm64_max(arm64_max(line1[0], line1[1]), line1[2]); float max2 = arm64_max(arm64_max(line2[0], line2[1]), line2[2]); *out_ptr = arm64_max(max2, max1); out_ptr++; line1 += 1; line2 += 1; } // h begin right----[line1+=2]----------------------------------- *out_ptr = arm64_max(arm64_max(line1[0], line1[1]), arm64_max(line2[0], line2[1])); out_ptr++; line1 += 2; line2 += 2; // h center --------------------------------------- const float* line0 = input + c * in_hw; for (int i = 0; i < mid_h; i++) { // left float max0 = arm64_max(arm64_max(line1[0], line1[1]), arm64_max(line2[0], line2[1])); *out_ptr = arm64_max(arm64_max(line0[0], line0[1]), max0); out_ptr++; // mid for (int j = 0; j < mid_w; j++) { float max0 = arm64_max(arm64_max(line0[0], line0[1]), line0[2]); float max1 = arm64_max(arm64_max(line1[0], line1[1]), line1[2]); float max2 = arm64_max(arm64_max(line2[0], line2[1]), line2[2]); *out_ptr = arm64_max(arm64_max(max0, max1), max2); out_ptr++; line0 += 1; line1 += 1; line2 += 1; } max0 = arm64_max(arm64_max(line1[0], line1[1]), arm64_max(line2[0], line2[1])); *out_ptr = arm64_max(arm64_max(line0[0], line0[1]), max0); out_ptr++; line0 += 2; line1 += 2; line2 += 2; } // h end ------------------------------------------ *out_ptr = arm64_max(arm64_max(line1[0], line1[1]), arm64_max(line0[0], line0[1])); out_ptr++; for (int j = 0; j < mid_w; j++) { float max0 = arm64_max(arm64_max(line0[0], line0[1]), line0[2]); float max1 = arm64_max(arm64_max(line1[0], line1[1]), line1[2]); *out_ptr = arm64_max(max0, max1); out_ptr++; line0 += 1; line1 += 1; } *out_ptr = arm64_max(arm64_max(line1[0], line1[1]), arm64_max(line0[0], line0[1])); } } static void avg_3x3s1_p1(const float* input, float* output, int inc, int inh, int inw, int outh, int outw, int k_h, int k_w, int s_h, int s_w, int pad_h0, int pad_w0, int pad_h1, int pad_w1, int is_caffe) { // fprintf(stderr, "avg_3x3s1_p1\n"); int in_hw = inw * inh; int mid_w = inw - 2; int mid_h = inh - 2; for (int c = 0; c < inc; c++) { const float* line1 = input + c * in_hw; const float* line2 = line1 + inw; float* out_ptr = output + c * in_hw; // h begin left----[line1+=0]----------------------------------- if (is_caffe == 0) *out_ptr = (line1[0] + line1[1] + line2[0] + line2[1]) * 0.25f; else *out_ptr = (line1[0] + line1[1] + line2[0] + line2[1]) * 0.11111111f; out_ptr++; // h begin center----[line1+=1]---------------------------------- for (int j = 0; j < mid_w; j++) { if (is_caffe == 0) *out_ptr = (line1[0] + line1[1] + line1[2] + line2[0] + line2[1] + line2[2]) * 0.16666667f; else *out_ptr = (line1[0] + line1[1] + line1[2] + line2[0] + line2[1] + line2[2]) * 0.11111111f; out_ptr++; line1 += 1; line2 += 1; } // h begin right----[line1+=2]----------------------------------- if (is_caffe == 0) *out_ptr = (line1[0] + line1[1] + line2[0] + line2[1]) * 0.25f; else *out_ptr = (line1[0] + line1[1] + line2[0] + line2[1]) * 0.11111111f; out_ptr++; line1 += 2; line2 += 2; // h center --------------------------------------- const float* line0 = input + c * in_hw; for (int i = 0; i < mid_h; i++) { // left if (is_caffe == 0) *out_ptr = (line0[0] + line0[1] + line1[0] + line1[1] + line2[0] + line2[1]) * 0.16666667f; else *out_ptr = (line0[0] + line0[1] + line1[0] + line1[1] + line2[0] + line2[1]) * 0.11111111f; out_ptr++; // mid for (int j = 0; j < mid_w; j++) { *out_ptr = (line0[0] + line0[1] + line0[2] + line1[0] + line1[1] + line1[2] + line2[0] + line2[1] + line2[2]) * 0.11111111f; out_ptr++; line0 += 1; line1 += 1; line2 += 1; } if (is_caffe == 0) *out_ptr = (line0[0] + line0[1] + line1[0] + line1[1] + line2[0] + line2[1]) * 0.16666667f; else *out_ptr = (line0[0] + line0[1] + line1[0] + line1[1] + line2[0] + line2[1]) * 0.11111111f; out_ptr++; line0 += 2; line1 += 2; line2 += 2; } // h end ------------------------------------------ if (is_caffe == 0) *out_ptr = (line0[0] + line0[1] + line1[0] + line1[1]) * 0.25f; else *out_ptr = (line0[0] + line0[1] + line1[0] + line1[1]) * 0.11111111f; out_ptr++; for (int j = 0; j < mid_w; j++) { if (is_caffe == 0) *out_ptr = (line0[0] + line0[1] + line0[2] + line1[0] + line1[1] + line1[2]) * 0.16666667f; else *out_ptr = (line0[0] + line0[1] + line0[2] + line1[0] + line1[1] + line1[2]) * 0.11111111f; out_ptr++; line0 += 1; line1 += 1; } if (is_caffe == 0) *out_ptr = (line0[0] + line0[1] + line1[0] + line1[1]) * 0.25f; else *out_ptr = (line0[0] + line0[1] + line1[0] + line1[1]) * 0.11111111f; } } static void avg_global(const float* input, float* output, int inc, int inh, int inw, int outh, int outw, int k_h, int k_w, int s_h, int s_w, int pad_h0, int pad_w0, int pad_h1, int pad_w1, int is_caffe) { int in_hw = inw * inh; int block = in_hw >> 3; int tail = in_hw & ~7; for (int c = 0; c < inc; c++) { const float* line0 = input + c * in_hw; float* out_ptr = output + c; float sum = 0.f; for (int j = 0; j < block; j++) { float32x4_t p00 = vld1q_f32(line0); float32x4_t p01 = vld1q_f32(line0 + 4); p00 = vaddq_f32(p00, p01); // p00=vpaddq_f32(p00,p00); // sum+=(p00[0]+p00[1]); sum += (p00[0] + p00[1] + p00[2] + p00[3]); line0 += 8; } for (int j = tail; j < in_hw; j++) { sum += line0[0]; line0++; } *out_ptr = sum / in_hw; } } static void max_global(const float* input, float* output, int inc, int inh, int inw, int outh, int outw, int k_h, int k_w, int s_h, int s_w, int pad_h0, int pad_w0, int pad_h1, int pad_w1, int is_caffe) { int in_hw = inw * inh; int block = in_hw >> 3; int tail = in_hw & ~7; for (int c = 0; c < inc; c++) { const float* line0 = input + c * in_hw; float* out_ptr = output + c; float32x4_t p00 = vld1q_f32(line0); float32x4_t res = p00; for (int j = 0; j < block; j++) { float32x4_t p00 = vld1q_f32(line0); float32x4_t p01 = vld1q_f32(line0 + 4); float32x4_t max0 = vmaxq_f32(p00, p01); res = vmaxq_f32(res, max0); line0 += 8; } float max_ = arm64_max(arm64_max(res[0], res[1]), arm64_max(res[2], res[3])); for (int j = tail; j < in_hw; j++) { max_ = arm64_max(max_, line0[0]); line0++; } *out_ptr = max_; } } int pooling_kernel_perf_prerun(struct ir_tensor* input, struct ir_tensor* out, struct pool_param* param) { int pool_size = POOL_GENERIC; /* global pooling */ if (param->global) { if (param->pool_method == POOL_AVG) param->funct = ( pooling_kernel_t )avg_global; else if (param->pool_method == POOL_MAX) param->funct = ( pooling_kernel_t )max_global; assert(param->funct != NULL); return 0; } /* general pooling */ if (param->stride_h == 2 && param->stride_w == 2) { if (param->kernel_h == 2 && param->kernel_w == 2) pool_size = POOL_K2S2; else if (param->kernel_h == 3 && param->kernel_w == 3) pool_size = POOL_K3S2; } else if (param->stride_h == 1 && param->stride_w == 1) { if (param->kernel_h == 3 && param->kernel_w == 3) pool_size = POOL_K3S1; } /* general max pooling, k2s2, k2k2p1, k3s1p1, k3s2, k3s2p1 */ if (param->pool_method == POOL_MAX) { if ((param->pad_h0 == param->pad_w0) && (param->pad_h1 == param->pad_w1)) { if (param->pad_h0 == 0) { if (pool_size == POOL_K2S2) param->funct = ( pooling_kernel_t )max_2x2s2; else if (pool_size == POOL_K3S2) param->funct = ( pooling_kernel_t )max_3x3s2; } else if (param->pad_h0 == 1) { if (pool_size == POOL_K2S2) param->funct = ( pooling_kernel_t )max_2x2s2_p1; else if (pool_size == POOL_K3S2) param->funct = ( pooling_kernel_t )max_3x3s2_p1; else if (pool_size == POOL_K3S1) param->funct = ( pooling_kernel_t )max_3x3s1_p1; } } if (param->funct != NULL) return 0; else { fprintf(stderr, "perf general max pooling func not be find\n"); return -1; } } /* general avg pooling, k2s2, k2s2p1, k3s2, k3s2p1 */ if (param->pool_method == POOL_AVG) { if ((param->pad_h0 == param->pad_w0) && (param->pad_h1 == param->pad_w1)) { if (param->pad_h0 == 0 && param->pad_h1 == 0) { if (pool_size == POOL_K2S2) param->funct = ( pooling_kernel_t )avg_2x2s2; else if (pool_size == POOL_K3S2) param->funct = ( pooling_kernel_t )avg_3x3s2; } else if (param->pad_h0 == 1 && param->pad_h1 == 1) { if (pool_size == POOL_K2S2) param->funct = ( pooling_kernel_t )avg_2x2s2_p1; else if (pool_size == POOL_K3S2) param->funct = ( pooling_kernel_t )avg_3x3s2_p1; else if (pool_size == POOL_K3S1) param->funct = ( pooling_kernel_t )avg_3x3s1_p1; } else if (param->pad_h0 == 0 && param->pad_h1 == 1) { if (pool_size == POOL_K3S2) param->funct = ( pooling_kernel_t ) avg_3x3s2; } } if (param->funct != NULL) return 0; else { fprintf(stderr, "perf general avg pooling func not be find\n"); return -1; } } fprintf(stderr, "perf pooling func not be find\n"); return -1; } int pooling_kernel_perf_run(struct ir_tensor* input, struct ir_tensor* output, struct pool_param* param, int num_thread) { // fprintf(stderr, "perf pooling_kernel_run\n"); int is_caffe = param->caffe_flavor; pooling_kernel_t kernel = (pooling_kernel_t)(param->funct); int batch = input->dims[0]; int c = input->dims[1]; int in_h = input->dims[2]; int in_w = input->dims[3]; int out_h = output->dims[2]; int out_w = output->dims[3]; int img_size = c * in_h * in_w; int feature_size = c * out_h * out_w; for (int n = 0; n < batch; n++) { void* input_frame = input->data + n * img_size * input->elem_size; void* output_frame = output->data + n * feature_size * output->elem_size; #pragma omp parallel for num_threads(num_thread) for (int ch = 0; ch < c; ch++) { void* cur_input = input_frame + ch * in_h * in_w * input->elem_size; void* cur_output = output_frame + ch * out_h * out_w * output->elem_size; kernel(cur_input, cur_output, 1, in_h, in_w, out_h, out_w, param->kernel_h, param->kernel_w, param->stride_h, param->stride_w, param->pad_h0, param->pad_w0, param->pad_h1, param->pad_w1, is_caffe); } } return 0; }
opencl_iwork_fmt_plug.c
/* JtR format to crack iWork '09, and '13 / '14 files. * * This software is Copyright (c) 2015, Dhiru Kholia <kholia at kth.se> and * Maxime Hulliger <hulliger at kth.se>, and it is hereby released to the * general public under the following terms: * * Redistribution and use in source and binary forms, with or without * modification, are permitted. * * This code may be freely used and modified for any purpose. * * Big thanks to Sean Patrick O'Brien for making this format possible. */ #ifdef HAVE_OPENCL #if FMT_EXTERNS_H extern struct fmt_main fmt_opencl_iwork; #elif FMT_REGISTERS_H john_register_one(&fmt_opencl_iwork); #else #include <string.h> #ifdef _OPENMP #include <omp.h> #endif #include "aes.h" #include "hmac_sha.h" #include "arch.h" #include "formats.h" #include "common.h" #include "stdint.h" #include "iwork_common.h" #include "options.h" #include "jumbo.h" #include "sha2.h" #include "common-opencl.h" #include "misc.h" #define OUTLEN (16) #include "opencl_pbkdf2_hmac_sha1.h" #define FORMAT_LABEL "iwork-opencl" #define FORMAT_NAME "Apple iWork '09 / '13 / '14" #define OCL_ALGORITHM_NAME "PBKDF2-SHA1 OpenCL" #define CPU_ALGORITHM_NAME " AES" #define ALGORITHM_NAME OCL_ALGORITHM_NAME CPU_ALGORITHM_NAME #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #define BINARY_SIZE 0 #define BINARY_ALIGN MEM_ALIGN_WORD #define PLAINTEXT_LENGTH 64 #define SALT_SIZE sizeof(*fctx) #define SALT_ALIGN MEM_ALIGN_WORD /* This handles all widths */ #define GETPOS(i, index) (((index) % ocl_v_width) * 4 + ((i) & ~3U) * ocl_v_width + (((i) & 3) ^ 3) + ((index) / ocl_v_width) * 64 * ocl_v_width) static int *cracked; static int any_cracked; static iwork_common_custom_salt *fctx; static struct fmt_tests iwork_tests[] = { {"$iwork$1$2$1$100000$d77ce46a68697e08b76ac91de9117541$e7b72b2848dc27efed883963b00b1ac7$e794144cd2f04bd50e23957b30affb2898554a99a3accb7506c17132654e09c04bbeff45dc4f8a8a1db5fd1592f699eeff2f9a8c31b503e9631a25a344b517f7" ,"12345678"}, {FORMAT_TAG "1$2$1$100000$c773f06bcd580e4afa35618a7d0bee39$8b241504af92416f226d0eea4bf26443$18358e736a0401061f2dca103fceb29e88606d3ec80d09841360cbb8b9dc1d2908c270d3ff4c05cf7a46591e02ff3c9d75f4582f631721a3257dc087f98f523e", "password"}, // iWork '09 Keynote file {"$iwork$2$1$1$4000$736f6d6553616c74$a9d975f8b3e1bf0c388944b457127df4$09eb5d093584376001d4c94e9d0a41eb8a2993132849c5aed8e56e7bd0e8ed50ba38aced793e3480675990c828c01d25fe245cc6aa603c6cb1a0425988f1d3dc", "openwall"}, {NULL} }; static size_t key_buf_size; static unsigned int *inbuffer; static pbkdf2_out *output; static pbkdf2_salt currentsalt; static cl_mem mem_in, mem_out, mem_salt, mem_state; static size_t key_buf_size; static int new_keys; static struct fmt_main *self; static cl_kernel pbkdf2_init, pbkdf2_loop, pbkdf2_final; #define cracked_size (sizeof(*cracked) * global_work_size * ocl_v_width) /* * HASH_LOOPS is ideally made by factors of (iteration count - 1) and should * be chosen for a kernel duration of not more than 200 ms */ #define HASH_LOOPS (3 * 271) #define ITERATIONS 100000 /* Just for auto tune */ #define LOOP_COUNT (((currentsalt.iterations - 1 + HASH_LOOPS - 1)) / HASH_LOOPS) #define STEP 0 #define SEED 128 static const char * warn[] = { "P xfer: " , ", init: " , ", loop: " , ", final: ", ", res xfer: " }; static int split_events[] = { 2, -1, -1 }; //This file contains auto-tuning routine(s). Has to be included after formats definitions. #include "opencl-autotune.h" #include "memdbg.h" /* ------- Helper functions ------- */ static size_t get_task_max_work_group_size() { size_t s; s = autotune_get_task_max_work_group_size(FALSE, 0, pbkdf2_init); s = MIN(s, autotune_get_task_max_work_group_size(FALSE, 0, pbkdf2_loop)); s = MIN(s, autotune_get_task_max_work_group_size(FALSE, 0, pbkdf2_final)); return s; } #if 0 struct fmt_main *me; #endif static void create_clobj(size_t gws, struct fmt_main *self) { gws *= ocl_v_width; key_buf_size = 64 * gws; /// Allocate memory inbuffer = mem_calloc(1, key_buf_size); output = mem_alloc(sizeof(pbkdf2_out) * gws); cracked = mem_calloc(1, cracked_size); mem_in = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, key_buf_size, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error allocating mem in"); mem_salt = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, sizeof(pbkdf2_salt), NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error allocating mem setting"); mem_out = clCreateBuffer(context[gpu_id], CL_MEM_WRITE_ONLY, sizeof(pbkdf2_out) * gws, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error allocating mem out"); mem_state = clCreateBuffer(context[gpu_id], CL_MEM_READ_WRITE, sizeof(pbkdf2_state) * gws, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error allocating mem_state"); HANDLE_CLERROR(clSetKernelArg(pbkdf2_init, 0, sizeof(mem_in), &mem_in), "Error while setting mem_in kernel argument"); HANDLE_CLERROR(clSetKernelArg(pbkdf2_init, 1, sizeof(mem_salt), &mem_salt), "Error while setting mem_salt kernel argument"); HANDLE_CLERROR(clSetKernelArg(pbkdf2_init, 2, sizeof(mem_state), &mem_state), "Error while setting mem_state kernel argument"); HANDLE_CLERROR(clSetKernelArg(pbkdf2_loop, 0, sizeof(mem_state), &mem_state), "Error while setting mem_state kernel argument"); HANDLE_CLERROR(clSetKernelArg(pbkdf2_final, 0, sizeof(mem_salt), &mem_salt), "Error while setting mem_salt kernel argument"); HANDLE_CLERROR(clSetKernelArg(pbkdf2_final, 1, sizeof(mem_out), &mem_out), "Error while setting mem_out kernel argument"); HANDLE_CLERROR(clSetKernelArg(pbkdf2_final, 2, sizeof(mem_state), &mem_state), "Error while setting mem_state kernel argument"); } static void release_clobj(void) { if (cracked) { HANDLE_CLERROR(clReleaseMemObject(mem_in), "Release mem in"); HANDLE_CLERROR(clReleaseMemObject(mem_salt), "Release mem setting"); HANDLE_CLERROR(clReleaseMemObject(mem_state), "Release mem state"); HANDLE_CLERROR(clReleaseMemObject(mem_out), "Release mem out"); MEM_FREE(inbuffer); MEM_FREE(output); MEM_FREE(cracked); } } static void done(void) { if (autotuned) { release_clobj(); HANDLE_CLERROR(clReleaseKernel(pbkdf2_init), "Release kernel"); HANDLE_CLERROR(clReleaseKernel(pbkdf2_loop), "Release kernel"); HANDLE_CLERROR(clReleaseKernel(pbkdf2_final), "Release kernel"); HANDLE_CLERROR(clReleaseProgram(program[gpu_id]), "Release Program"); autotuned--; } } static void init(struct fmt_main *_self) { static char valgo[sizeof(ALGORITHM_NAME) + 8] = ""; self = _self; opencl_prepare_dev(gpu_id); /* VLIW5 does better with just 2x vectors due to GPR pressure */ if (!options.v_width && amd_vliw5(device_info[gpu_id])) ocl_v_width = 2; else ocl_v_width = opencl_get_vector_width(gpu_id, sizeof(cl_int)); if (ocl_v_width > 1) { /* Run vectorized kernel */ snprintf(valgo, sizeof(valgo), OCL_ALGORITHM_NAME " %ux" CPU_ALGORITHM_NAME, ocl_v_width); self->params.algorithm_name = valgo; } } static void reset(struct db_main *db) { if (!autotuned) { char build_opts[64]; snprintf(build_opts, sizeof(build_opts), "-DHASH_LOOPS=%u -DOUTLEN=%u " "-DPLAINTEXT_LENGTH=%u -DV_WIDTH=%u", HASH_LOOPS, OUTLEN, PLAINTEXT_LENGTH, ocl_v_width); opencl_init("$JOHN/kernels/pbkdf2_hmac_sha1_kernel.cl", gpu_id, build_opts); pbkdf2_init = clCreateKernel(program[gpu_id], "pbkdf2_init", &ret_code); HANDLE_CLERROR(ret_code, "Error creating kernel"); crypt_kernel = pbkdf2_loop = clCreateKernel(program[gpu_id], "pbkdf2_loop", &ret_code); HANDLE_CLERROR(ret_code, "Error creating kernel"); pbkdf2_final = clCreateKernel(program[gpu_id], "pbkdf2_final", &ret_code); HANDLE_CLERROR(ret_code, "Error creating kernel"); //Initialize openCL tuning (library) for this format. opencl_init_auto_setup(SEED, 2*HASH_LOOPS, split_events, warn, 2, self, create_clobj, release_clobj, ocl_v_width * sizeof(pbkdf2_state), 0, db); //Auto tune execution from shared/included code. autotune_run(self, 2 * (ITERATIONS - 1) + 4, 0, (cpu(device_info[gpu_id]) ? 1000000000 : 10000000000ULL)); } } static void set_salt(void *salt) { fctx = (iwork_common_custom_salt*)salt; memcpy((char*)currentsalt.salt, fctx->salt, fctx->salt_length); currentsalt.length = fctx->salt_length; currentsalt.iterations = fctx->iterations; currentsalt.outlen = 16; // AES 128-bit key only HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_salt, CL_FALSE, 0, sizeof(pbkdf2_salt), &currentsalt, 0, NULL, NULL), "Copy salt to gpu"); } static void clear_keys(void) { memset(inbuffer, 0, key_buf_size); } static void iwork_set_key(char *key, int index) { int i; int length = strlen(key); for (i = 0; i < length; i++) ((char*)inbuffer)[GETPOS(i, index)] = key[i]; new_keys = 1; } static char *get_key(int index) { static char ret[PLAINTEXT_LENGTH + 1]; int i = 0; while (i < PLAINTEXT_LENGTH && (ret[i] = ((char*)inbuffer)[GETPOS(i, index)])) i++; ret[i] = 0; return ret; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int i, j, index; size_t scalar_gws; size_t *lws = local_work_size ? &local_work_size : NULL; global_work_size = GET_MULTIPLE_OR_BIGGER_VW(count, local_work_size); scalar_gws = global_work_size * ocl_v_width; if (any_cracked) { memset(cracked, 0, cracked_size); any_cracked = 0; } /// Copy data to gpu if (ocl_autotune_running || new_keys) { BENCH_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_in, CL_FALSE, 0, key_buf_size, inbuffer, 0, NULL, multi_profilingEvent[0]), "Copy data to gpu"); new_keys = 0; } /// Run kernels BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], pbkdf2_init, 1, NULL, &global_work_size, lws, 0, NULL, multi_profilingEvent[1]), "Run initial kernel"); for (j = 0; j < (ocl_autotune_running ? 1 : (currentsalt.outlen + 19) / 20); j++) { for (i = 0; i < (ocl_autotune_running ? 1 : LOOP_COUNT); i++) { BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], pbkdf2_loop, 1, NULL, &global_work_size, lws, 0, NULL, multi_profilingEvent[2]), "Run loop kernel"); BENCH_CLERROR(clFinish(queue[gpu_id]), "Error running loop kernel"); opencl_process_event(); } BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], pbkdf2_final, 1, NULL, &global_work_size, lws, 0, NULL, multi_profilingEvent[3]), "Run intermediate kernel"); } /// Read the result back BENCH_CLERROR(clEnqueueReadBuffer(queue[gpu_id], mem_out, CL_TRUE, 0, sizeof(pbkdf2_out) * scalar_gws, output, 0, NULL, multi_profilingEvent[4]), "Copy result back"); if (!ocl_autotune_running) { #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index++) if (iwork_common_decrypt(fctx, (unsigned char*)output[index].dk, fctx->iv, fctx->blob)) { cracked[index] = 1; #ifdef _OPENMP #pragma omp atomic #endif any_cracked |= 1; } } return count; } static int cmp_all(void *binary, int count) { return any_cracked; } static int cmp_one(void *binary, int index) { return cracked[index]; } static int cmp_exact(char *source, int index) { return 1; } struct fmt_main fmt_opencl_iwork = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, { "iteration count", }, { FORMAT_TAG }, iwork_tests }, { init, done, reset, fmt_default_prepare, iwork_common_valid, fmt_default_split, fmt_default_binary, iwork_common_get_salt, { iwork_common_iteration_count, }, fmt_default_source, { fmt_default_binary_hash }, fmt_default_salt_hash, NULL, set_salt, iwork_set_key, get_key, clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */ #endif /* HAVE_OPENCL */
memory.c
void convert_memory(unsigned char *img, int width, int height, int channels, int threads, unsigned char *result) { int pixel_per_thread = (width * height) / threads; #pragma omp parallel for for (int thread = 0; thread < threads; thread++) { int end; if (thread + 1 == threads) { end = width * height; } else { end = pixel_per_thread * (thread + 1); } for (int i = pixel_per_thread * thread; i < end; i++) { result[i] = 0.2126 * img[(i * channels)] // red + 0.7152 * img[(i * channels) + 1] // green + 0.0722 * img[(i * channels) + 2]; // blue } } }
Mrpt.h
#ifndef CPP_MRPT_H_ #define CPP_MRPT_H_ #include <algorithm> #include <functional> #include <numeric> #include <random> #include <string> #include <vector> #include <cmath> #include <Eigen/Dense> #include <Eigen/SparseCore> using namespace Eigen; class Mrpt { public: /** * The constructor of the index.The constructor does not actually build * the index but that is done by the function 'grow' which has to be called * before queries can be made. * @param X_ - Pointer to the Eigen::Map which refers to the data matrix. */ Mrpt(const Map<const MatrixXf> *X_) : X(X_), n_samples(X_->cols()), dim(X_->rows()) {} ~Mrpt() {} /** * The function whose call starts the actual index construction. Initializes * arrays to store the tree structures and computes all the projections needed * later. Then repeatedly calls method grow_subtree that builds a single RP-tree. * @param n_trees_ - The number of trees to be used in the index. * @param depth_ - The depth of the trees. * @param density_ - Expected ratio of non-zero components in a projection matrix. * @param seed - A seed given to a rng when generating random vectors; * a default value 0 initializes the rng randomly with rd() */ void grow(int n_trees_, int depth_, float density_, int seed = 0) { n_trees = n_trees_; depth = depth_; density = density_; n_pool = n_trees_ * depth_; n_array = 1 << (depth_ + 1); density < 1 ? build_sparse_random_matrix(seed) : build_dense_random_matrix(seed); split_points = MatrixXf(n_array, n_trees); tree_leaves = std::vector<std::vector<int>>(n_trees); count_first_leaf_indices(); #pragma omp parallel for for (int n_tree = 0; n_tree < n_trees; ++n_tree) { MatrixXf tree_projections; if (density < 1) tree_projections.noalias() = sparse_random_matrix.middleRows(n_tree * depth, depth) * *X; else tree_projections.noalias() = dense_random_matrix.middleRows(n_tree * depth, depth) * *X; tree_leaves[n_tree] = std::vector<int>(n_samples); std::vector<int> &indices = tree_leaves[n_tree]; std::iota(indices.begin(), indices.end(), 0); grow_subtree(indices.begin(), indices.end(), 0, 0, n_tree, tree_projections); } } /** * This function finds the k approximate nearest neighbors of the query object * q. The accuracy of the query depends on both the parameters used for index * construction and additional parameters given to this function. This * function implements two tricks to improve performance. The voting trick * interprets each index object in leaves returned by tree traversals as votes, * and only performs the final linear search with the 'elect' most voted * objects. * @param q - The query object whose neighbors the function finds * @param k - The number of neighbors the user wants the function to return * @param votes_required - The number of votes required for an object to be included in the linear search step * @param out - The output buffer for the indices of the k approximate nearest neighbors * @param out_distances - Output buffer for distances of the k approximate nearest neighbors (optional parameter) * @return */ void query(const Map<VectorXf> &q, int k, int votes_required, int *out, float *out_distances = nullptr) const { VectorXf projected_query(n_pool); if (density < 1) projected_query.noalias() = sparse_random_matrix * q; else projected_query.noalias() = dense_random_matrix * q; std::vector<int> found_leaves(n_trees); /* * The following loops over all trees, and routes the query to exactly one * leaf in each. */ #pragma omp parallel for for (int n_tree = 0; n_tree < n_trees; ++n_tree) { int idx_tree = 0; for (int d = 0; d < depth; ++d) { const int j = n_tree * depth + d; const int idx_left = 2 * idx_tree + 1; const int idx_right = idx_left + 1; const float split_point = split_points(idx_tree, n_tree); if (projected_query(j) <= split_point) { idx_tree = idx_left; } else { idx_tree = idx_right; } } found_leaves[n_tree] = idx_tree - (1 << depth) + 1; } int n_elected = 0, max_leaf_size = n_samples / (1 << depth) + 1; VectorXi elected(n_trees * max_leaf_size); VectorXi votes = VectorXi::Zero(n_samples); // count votes for (int n_tree = 0; n_tree < n_trees; ++n_tree) { int leaf_begin = leaf_first_indices[found_leaves[n_tree]]; int leaf_end = leaf_first_indices[found_leaves[n_tree] + 1]; const std::vector<int> &indices = tree_leaves[n_tree]; for (int i = leaf_begin; i < leaf_end; ++i) { int idx = indices[i]; if (++votes(idx) == votes_required) elected(n_elected++) = idx; } } if (n_elected < k) { /* * If not enough samples had at least votes_required * votes, find the maximum amount of votes needed such * that the final search set size has at least k samples */ VectorXf::Index max_index; votes.maxCoeff(&max_index); int max_votes = votes(max_index); VectorXi vote_count = VectorXi::Zero(max_votes + 1); for (int i = 0; i < n_samples; ++i) vote_count(votes(i))++; for (int would_elect = 0; max_votes; --max_votes) { would_elect += vote_count(max_votes); if (would_elect >= k) break; } for (int i = 0; i < n_samples; ++i) { if (votes(i) >= max_votes && votes(i) < votes_required) elected(n_elected++) = i; } } exact_knn(q, k, elected, n_elected, out, out_distances); } /** * find k nearest neighbors from data for the query point * @param q - query point as a vector * @param k - number of neighbors searched for * @param indices - indices of the points in the original matrix where the search is made * @param out - output buffer for the indices of the k approximate nearest neighbors * @param out_distances - output buffer for distances of the k approximate nearest neighbors (optional parameter) * @return */ void exact_knn(const Map<VectorXf> &q, int k, const VectorXi &indices, int n_elected, int *out, float *out_distances = nullptr) const { VectorXf distances(n_elected); #pragma omp parallel for for (int i = 0; i < n_elected; ++i) distances(i) = (X->col(indices(i)) - q).squaredNorm(); if (k == 1) { MatrixXf::Index index; distances.minCoeff(&index); out[0] = indices(index); if(out_distances) { out_distances[0] = std::sqrt(distances(index)); } return; } VectorXi idx(n_elected); std::iota(idx.data(), idx.data() + n_elected, 0); std::partial_sort(idx.data(), idx.data() + k, idx.data() + n_elected, [&distances](int i1, int i2) {return distances(i1) < distances(i2);}); for (int i = 0; i < k; ++i) out[i] = indices(idx(i)); if(out_distances) { for(int i = 0; i < k; ++i) out_distances[i] = std::sqrt(distances(idx(i))); } } /** * Saves the index to a file. * @param path - Filepath to the output file. * @return True if saving succeeded, false otherwise. */ bool save(const char *path) const { FILE *fd; if ((fd = fopen(path, "wb")) == NULL) return false; fwrite(&n_trees, sizeof(int), 1, fd); fwrite(&depth, sizeof(int), 1, fd); fwrite(&density, sizeof(float), 1, fd); fwrite(split_points.data(), sizeof(float), n_array * n_trees, fd); // save tree leaves for (int i = 0; i < n_trees; ++i) { int sz = tree_leaves[i].size(); fwrite(&sz, sizeof(int), 1, fd); fwrite(&tree_leaves[i][0], sizeof(int), sz, fd); } // save random matrix if (density < 1) { int non_zeros = sparse_random_matrix.nonZeros(); fwrite(&non_zeros, sizeof(int), 1, fd); for (int k = 0; k < sparse_random_matrix.outerSize(); ++k) { for (SparseMatrix<float, RowMajor>::InnerIterator it(sparse_random_matrix, k); it; ++it) { float val = it.value(); int row = it.row(), col = it.col(); fwrite(&row, sizeof(int), 1, fd); fwrite(&col, sizeof(int), 1, fd); fwrite(&val, sizeof(float), 1, fd); } } } else { fwrite(dense_random_matrix.data(), sizeof(float), n_pool * dim, fd); } fclose(fd); return true; } /** * Loads the index from a file. * @param path - Filepath to the index file. * @return True if loading succeeded, false otherwise. */ bool load(const char *path) { FILE *fd; if ((fd = fopen(path, "rb")) == NULL) return false; fread(&n_trees, sizeof(int), 1, fd); fread(&depth, sizeof(int), 1, fd); fread(&density, sizeof(float), 1, fd); n_pool = n_trees * depth; n_array = 1 << (depth + 1); count_first_leaf_indices(); split_points = MatrixXf(n_array, n_trees); fread(split_points.data(), sizeof(float), n_array * n_trees, fd); // load tree leaves tree_leaves = std::vector<std::vector<int>>(n_trees); for (int i = 0; i < n_trees; ++i) { int sz; fread(&sz, sizeof(int), 1, fd); std::vector<int> leaves(sz); fread(&leaves[0], sizeof(int), sz, fd); tree_leaves[i] = leaves; } // load random matrix if (density < 1) { int non_zeros; fread(&non_zeros, sizeof(int), 1, fd); sparse_random_matrix = SparseMatrix<float>(n_pool, dim); std::vector<Triplet<float>> triplets; for (int k = 0; k < non_zeros; ++k) { int row, col; float val; fread(&row, sizeof(int), 1, fd); fread(&col, sizeof(int), 1, fd); fread(&val, sizeof(float), 1, fd); triplets.push_back(Triplet<float>(row, col, val)); } sparse_random_matrix.setFromTriplets(triplets.begin(), triplets.end()); sparse_random_matrix.makeCompressed(); } else { dense_random_matrix = Matrix<float, Dynamic, Dynamic, RowMajor>(n_pool, dim); fread(dense_random_matrix.data(), sizeof(float), n_pool * dim, fd); } fclose(fd); return true; } /** * Accessor for split points of trees (for testing purposes) * @param tree - index of tree in (0, ... , T-1) * @param index - the index of branch in (0, ... , (2^depth) - 1): * 0 = root * 1 = first branch of first level * 2 = second branch of first level * 3 = first branch of second level etc. * @return split point of index:th branch of tree:th tree */ float get_split_point(int tree, int index) const { return split_points(index, tree); } /** * Accessor for point stored in leaves of trees (for testing purposes) * @param tree - index of tree in (0, ... T-1) * @param leaf - index of leaf in (0, ... , 2^depth) * @param index - index of a data point in a leaf * @return index of index:th data point in leaf:th leaf of tree:th tree */ int get_leaf_point(int tree, int leaf, int index) const { int leaf_begin = leaf_first_indices[leaf]; return tree_leaves[tree][leaf_begin + index]; } /** * Accessor for the number of points in a leaf of a tree (for test purposes) * @param tree - index of tree in (0, ... T-1) * @param leaf - index of leaf in (0, ... , 2^depth) * @return - number of data points in leaf:th leaf of tree:th tree */ int get_leaf_size(int tree, int leaf) const { return leaf_first_indices[leaf + 1] - leaf_first_indices[leaf]; } /** * @return - number of trees in the index */ int get_n_trees() const { return split_points.cols(); } /** * @return - depth of trees of index */ int get_depth() const { if(sparse_random_matrix.rows() > 0) { return sparse_random_matrix.rows() / get_n_trees(); } else { return dense_random_matrix.rows() / get_n_trees(); } } /** * @return - number of points of the data set from which the index is built */ int get_n_points() const { return n_samples; } private: /** * Builds a single random projection tree. The tree is constructed by recursively * projecting the data on a random vector and splitting into two by the median. * @param begin - iterator to the index of the first data point of this branch * @param end - iterator to the index of the last data point of this branch * @param tree_level - The level in tree where the recursion is at * @param i - The index within the tree where we are at * @param n_tree - The index of the tree within the index * @param tree_projections - Precalculated projection values for the current tree */ void grow_subtree(std::vector<int>::iterator begin, std::vector<int>::iterator end, int tree_level, int i, int n_tree, const MatrixXf &tree_projections) { int n = end - begin; int idx_left = 2 * i + 1; int idx_right = idx_left + 1; if (tree_level == depth) return; std::nth_element(begin, begin + n/2, end, [&tree_projections, tree_level] (int i1, int i2) { return tree_projections(tree_level, i1) < tree_projections(tree_level, i2); }); auto mid = end - n/2; if(n % 2) { split_points(i, n_tree) = tree_projections(tree_level, *(mid - 1)); } else { auto left_it = std::max_element(begin, mid, [&tree_projections, tree_level] (int i1, int i2) { return tree_projections(tree_level, i1) < tree_projections(tree_level, i2); }); split_points(i, n_tree) = (tree_projections(tree_level, *mid) + tree_projections(tree_level, *left_it)) / 2.0; } grow_subtree(begin, mid, tree_level + 1, idx_left, n_tree, tree_projections); grow_subtree(mid, end, tree_level + 1, idx_right, n_tree, tree_projections); } /** * Builds a random sparse matrix for use in random projection. The components of * the matrix are drawn from the distribution * * 0 w.p. 1 - a * N(0, 1) w.p. a * * where a = density. * * @param seed - A seed given to a rng when generating random vectors; * a default value 0 initializes the rng randomly with rd() */ void build_sparse_random_matrix(int seed = 0) { sparse_random_matrix = SparseMatrix<float, RowMajor>(n_pool, dim); std::random_device rd; int s = seed ? seed : rd(); std::mt19937 gen(s); std::uniform_real_distribution<float> uni_dist(0, 1); std::normal_distribution<float> norm_dist(0, 1); std::vector<Triplet<float>> triplets; for (int j = 0; j < n_pool; ++j) { for (int i = 0; i < dim; ++i) { if (uni_dist(gen) > density) continue; triplets.push_back(Triplet<float>(j, i, norm_dist(gen))); } } sparse_random_matrix.setFromTriplets(triplets.begin(), triplets.end()); sparse_random_matrix.makeCompressed(); } /* * Builds a random dense matrix for use in random projection. The components of * the matrix are drawn from the standard normal distribution. * @param seed - A seed given to a rng when generating random vectors; * a default value 0 initializes the rng randomly with rd() */ void build_dense_random_matrix(int seed = 0) { dense_random_matrix = Matrix<float, Dynamic, Dynamic, RowMajor>(n_pool, dim); std::random_device rd; int s = seed ? seed : rd(); std::mt19937 gen(s); std::normal_distribution<float> normal_dist(0, 1); std::generate(dense_random_matrix.data(), dense_random_matrix.data() + n_pool * dim, [&normal_dist, &gen] { return normal_dist(gen); }); } /** * Computes the leaf sizes of a tree assuming a median split and that * when the number points is odd, the extra point is always assigned to * to the left branch. * @param n - number data points * @param level - current level of the tree * @param tree_depth - depth of the whole tree * @param out_leaf_sizes - vector for the output; after completing * the function is a vector of length n containing the leaf sizes */ void count_leaf_sizes(int n, int level, int tree_depth, std::vector<int> &out_leaf_sizes) { if(level == tree_depth) { out_leaf_sizes.push_back(n); return; } count_leaf_sizes(n - n/2, level + 1, tree_depth, out_leaf_sizes); count_leaf_sizes(n/2, level + 1, tree_depth, out_leaf_sizes); } /** * Computes indices of the first elements of leaves in a vector containing * all the leaves of a tree concatenated. Assumes that median split is used * and when the number points is odd, the extra point is always assigned to * to the left branch. */ void count_first_leaf_indices() { std::vector<int> leaf_sizes; count_leaf_sizes(n_samples, 0, depth, leaf_sizes); leaf_first_indices = std::vector<int>(leaf_sizes.size() + 1); leaf_first_indices[0] = 0; for(int i = 0; i < leaf_sizes.size(); ++i) leaf_first_indices[i+1] = leaf_first_indices[i] + leaf_sizes[i]; } const Map<const MatrixXf> *X; // the data matrix MatrixXf split_points; // all split points in all trees std::vector<std::vector<int>> tree_leaves; // contains all leaves of all trees Matrix<float, Dynamic, Dynamic, RowMajor> dense_random_matrix; // random vectors needed for all the RP-trees SparseMatrix<float, RowMajor> sparse_random_matrix; // random vectors needed for all the RP-trees std::vector<int> leaf_first_indices; // first indices of each leaf of tree in tree_leaves const int n_samples; // sample size of data const int dim; // dimension of data int n_trees; // number of RP-trees int depth; // depth of an RP-tree with median split float density; // expected ratio of non-zero components in a projection matrix int n_pool; // amount of random vectors needed for all the RP-trees int n_array; // length of the one RP-tree as array }; #endif // CPP_MRPT_H_
List.h
/** 2017 Neil Edelman, distributed under the terms of the MIT License; see readme.txt, or \url{ https://opensource.org/licenses/MIT }. {<T>List} is a doubly-linked-list of {<T>Link}, of which data of type, {<T>}, must be set using {LIST_TYPE}. This is an abstract data structure requiring {<T>Link} storage, and can possibly store this as a sub-structure of larger, possibly polymorphic data-type. This data-structure is closed; that is, given a valid pointer to an element, one can determine all other pointers, (the elements and the list itself,) in {O(n)}. This is useful as a single-source of information, and simplifies traversal, but requires the linking of two nodes in an empty list; statically un-initialised data, (zero-filled,) will crash, see \see{<T>ListClear}. Supports one to four multiply-linked-lists, (different orders.) The preprocessor macros are all undefined at the end of the file for convenience. @param LIST_NAME, LIST_TYPE The name that literally becomes {<T>}, and a valid type associated therewith, accessible to the compiler at the time of inclusion; should be conformant to naming and to the maximum available length of identifiers. Must each be present before including. @param LIST_COMPARATOR or LIST_U[A-D]_NAME, LIST_U[A-D]_COMPARATOR Each {LIST_U[A-D]_NAME} literally becomes, {<U>}, an order, and optional comparator, {LIST_U[A-D]_COMPARATOR}, an equivalence relation function implementing {<T>Comparator}. Not defining this implies one anonymous order which one can set a comparator using {LIST_COMPARATOR}; {<U>} will be an empty string, in this case. @param LIST_TO_STRING Optional print function implementing {<T>ToString}; makes available \see{<T>List<U>ToString}. @param LIST_OPENMP Tries to parallelise using {OpenMP}, \url{ http://www.openmp.org/ }. This is limited to some, usually multi-order, functions. @param LIST_TEST Unit testing framework using {<T>ListTest}, included in a separate header, {../test/ListTest.h}. Must be defined equal to a (random) filler, satisfying {<T>Action}. If {NDEBUG} is not defined, turns on {assert} private function integrity testing. Requires {LIST_TO_STRING}. @title List.h @author Neil @std C89/90 @version 2018-04 {<T>ListNode} has been shortened to {<T>Link}, thus potential namespace violations doubled. Two dynamic memory allocations have been collapsed into one by making it a non-pointer at the cost of readability. These changes are good for more complex data structures #including list. @since 2018-02 Eliminated the need for unnecessarily {<T>List}. Now one must initialise static variables with {<T>ListClear}. Eliminated {LIST_STATIC_SORT}. 2017-12 Type information on backing. 2017-10 Anonymous orders. 2017-07 Made migrate simpler. 2017-06 Split Add into Push and Unshift. 2017-05 Separated from backing. @fixme {GCC}: {#pragma GCC diagnostic ignored "-Wconversion"}; libc 4.2 {assert} warnings on {LIST_TEST}. @fixme {MSVC}: {#pragma warning(disable: x)} where {x} is: 4464 contains '..' uhm, thanks?; 4706 not {Java}; 4710, 4711 inlined info; 4820 padding info; 4996 not {C++11}. @fixme {clang}: {#pragma clang diagnostic ignored "-Wx"} where {x} is: {padded}; {documentation}; {documentation-unknown-command} it's not quite {clang-tags}; 3.8 {disabled-macro-expansion} on {toupper} in {LIST_TEST}. @fixme Non-const void pointers in {<T>List<U>BiAction} are not effective; have an interface. While we're at it, {<T>LinkMigrate} should be an interface. Everything should be an interface. */ /* 2017-05-12 tested with: gcc version 4.2.1 (Apple Inc. build 5666) (dot 3) Apple clang version 1.7 (tags/Apple/clang-77) (based on LLVM 2.9svn) gcc version 4.9.2 (Debian 4.9.2-10) Microsoft Visual Studio Enterprise 2015 Version 14.0.25424.00 Update 3 Borland 10.1 Embarcadero C++ 7.20 for Win32 MinGW gcc version 4.9.3 (GCC) Win32 gcc version 5.4.0 20160609 (Ubuntu 5.4.0-6ubuntu1~16.04.4) clang version 3.8.0-2ubuntu4 (tags/RELEASE_380/final) */ /* Original #include in the user's C file, and not from calling recursively; all "LIST_*" names are assumed to be reserved. */ #if !defined(LIST_U_NAME) /* <-- !LIST_U_NAME */ #include <stddef.h> /* ptrdiff_t offset_of */ #include <assert.h> /* assert */ #ifdef LIST_TO_STRING /* <-- print */ #include <stdio.h> /* sprintf */ #include <string.h> /* strlen */ #endif /* print --> */ /* Check defines; {[A, D]} is just arbitrary; more could be added. */ #ifndef LIST_NAME #error List generic LIST_NAME undefined. #endif #ifndef LIST_TYPE #error List generic LIST_TYPE undefined. #endif #if defined(LIST_UA_COMPARATOR) && !defined(LIST_UA_NAME) #error List: LIST_UA_COMPARATOR requires LIST_UA_NAME. #endif #if defined(LIST_UB_COMPARATOR) && !defined(LIST_UB_NAME) #error List: LIST_UB_COMPARATOR requires LIST_UB_NAME. #endif #if defined(LIST_UC_COMPARATOR) && !defined(LIST_UC_NAME) #error List: LIST_UC_COMPARATOR requires LIST_UC_NAME. #endif #if defined(LIST_UD_COMPARATOR) && !defined(LIST_UD_NAME) #error List: LIST_UD_COMPARATOR requires LIST_UD_NAME. #endif /* Anonymous one-order implicit macro into {UA} for convenience and brevity. */ #if !defined(LIST_UA_NAME) && !defined(LIST_UB_NAME) \ && !defined(LIST_UC_NAME) && !defined(LIST_UD_NAME) /* <-- anon */ #define LIST_U_ANONYMOUS #define LIST_UA_NAME #ifdef LIST_COMPARATOR #define LIST_UA_COMPARATOR LIST_COMPARATOR #endif #else /* anon --><-- !anon */ #ifdef LIST_COMPARATOR #error List: LIST_COMPARATOR can only be anonymous; use LIST_U[A-D]_COMPARATOR. #endif #endif /* !anon --> */ #if defined(LIST_TEST) && !defined(LIST_TO_STRING) /* <-- error */ #error LIST_TEST requires LIST_TO_STRING. #endif /* error --> */ #if !defined(LIST_TEST) && !defined(NDEBUG) /* <-- !assert */ #define LIST_NDEBUG #define NDEBUG #endif /* !assert --> */ #if defined(LIST_UA_COMPARATOR) || defined(LIST_UB_COMPARATOR) \ || defined(LIST_UC_COMPARATOR) || defined(LIST_UD_COMPARATOR) /* <-- some */ #define LIST_SOME_COMPARATOR #endif /* some --> */ /* Generics using the preprocessor; \url{ http://stackoverflow.com/questions/16522341/pseudo-generics-in-c }. */ #ifdef CAT #undef CAT #endif #ifdef CAT_ #undef CAT_ #endif #ifdef PCAT #undef PCAT #endif #ifdef PCAT_ #undef PCAT_ #endif #ifdef T #undef T #endif #ifdef T_ #undef T_ #endif #ifdef PT_ #undef PT_ #endif #define CAT_(x, y) x ## y #define CAT(x, y) CAT_(x, y) #define PCAT_(x, y) x ## _ ## y #define PCAT(x, y) PCAT_(x, y) #define T_(thing) CAT(LIST_NAME, thing) #define PT_(thing) PCAT(list, PCAT(LIST_NAME, thing)) /* {private <T>} */ /* Troubles with this line? check to ensure that {LIST_TYPE} is a valid type, whose definition is placed above {#include "List.h"}. */ typedef LIST_TYPE PT_(Type); #define T PT_(Type) /* [A, D] */ #ifdef UA_ #undef UA_ #undef T_UA_ #undef PT_UA_ #endif #ifdef UB_ #undef UB_ #undef T_UB_ #undef PT_UB_ #endif #ifdef UC_ #undef UC_ #undef T_UC_ #undef PT_UC_ #endif #ifdef UD_ #undef UD_ #undef T_UD_ #undef PT_UD_ #endif /* Data exclusively, public f'ns, and private f'ns. */ #ifdef LIST_U_ANONYMOUS /* <-- anon: We are using C89; "Empty macro arguments were standardized in C99," workaround. */ #define UA_(thing) PCAT(anonymous, thing) #define T_UA_(thing1, thing2) CAT(CAT(LIST_NAME, thing1), thing2) #define PT_UA_(thing1, thing2) PCAT(list, PCAT(PCAT(LIST_NAME, thing1), \ CAT(_, thing2))) #else /* anon --><-- !anon */ #ifdef LIST_UA_NAME #define UA_(thing) PCAT(LIST_UA_NAME, thing) #define T_UA_(thing1, thing2) CAT(CAT(LIST_NAME, thing1), \ CAT(LIST_UA_NAME, thing2)) #define PT_UA_(thing1, thing2) PCAT(list, PCAT(PCAT(LIST_NAME, thing1), \ PCAT(LIST_UA_NAME, thing2))) #endif #ifdef LIST_UB_NAME #define UB_(thing) PCAT(LIST_UB_NAME, thing) #define T_UB_(thing1, thing2) CAT(CAT(LIST_NAME, thing1), \ CAT(LIST_UB_NAME, thing2)) #define PT_UB_(thing1, thing2) PCAT(list, PCAT(PCAT(LIST_NAME, thing1), \ PCAT(LIST_UB_NAME, thing2))) #endif #ifdef LIST_UC_NAME #define UC_(thing) PCAT(LIST_UC_NAME, thing) #define T_UC_(thing1, thing2) CAT(CAT(LIST_NAME, thing1), \ CAT(LIST_UC_NAME, thing2)) #define PT_UC_(thing1, thing2) PCAT(list, PCAT(PCAT(LIST_NAME, thing1), \ PCAT(LIST_UC_NAME, thing2))) #endif #ifdef LIST_UD_NAME #define UD_(thing) PCAT(LIST_UD_NAME, thing) #define T_UD_(thing1, thing2) CAT(CAT(LIST_NAME, thing1), \ CAT(LIST_UD_NAME, thing2)) #define PT_UD_(thing1, thing2) PCAT(list, PCAT(PCAT(LIST_NAME, thing1), \ PCAT(LIST_UD_NAME, thing2))) #endif #endif /* !anon --> */ /* Constants across multiple includes in the same translation unit. */ #ifndef LIST_H /* <-- LIST_H */ #define LIST_H /* \see{combine_sets} operations bit-vector; dummy {LO_?}: {clang -Weverything} complains that it is not closed under union, a very valid point. */ enum ListOperation { LO_SUBTRACTION_AB = 1, LO_SUBTRACTION_BA = 2, LO_A, LO_INTERSECTION = 4, LO_B, LO_C, LO_D, LO_DEFAULT_A = 8, LO_E, LO_F, LO_G, LO_H, LO_I, LO_J, LO_K, LO_DEFAULT_B = 16, LO_L, LO_M, LO_N, LO_O, LO_P, LO_Q, LO_R, LO_S, LO_T, LO_U, LO_V, LO_W, LO_X, LO_Y, LO_Z }; /* Use this to statically initialise. How many orders are the [2-4]. This is an initialisation constant expression, eg, {struct <T>List list = LIST_EMPTY(list);} for one order. */ #define LIST_EMPTY(l) { { 0, &(l).tail }, { &(l).head, 0 } } #define LIST_EMPTY_2(l) { { 0, &(l).tail, 0, &(l).tail }, \ { &(l).head, 0, &(l).head, 0 } } #define LIST_EMPTY_3(l) { { 0, &(l).tail, 0, &(l).tail, 0, &(l).tail }, \ { &(l).head, 0, &(l).head, 0, &(l).head, 0 } } #define LIST_EMPTY_4(l) { \ { 0, &(l).tail, 0, &(l).tail, 0, &(l).tail, 0, &(l).tail }, \ { &(l).head, 0, &(l).head, 0, &(l).head, 0, &(l).head, 0 } } #endif /* LIST_H */ /* One time in the same translation unit. */ #ifndef MIGRATE /* <-- migrate */ #define MIGRATE /** Contains information about a {realloc}. */ struct Migrate; struct Migrate { const void *begin, *end; /* Old pointers. */ ptrdiff_t delta; }; #endif /* migrate --> */ /* Private list position. */ struct PT_(X) { #ifdef LIST_UA_NAME struct PT_(X) *UA_(prev), *UA_(next); #endif #ifdef LIST_UB_NAME struct PT_(X) *UB_(prev), *UB_(next); #endif #ifdef LIST_UC_NAME struct PT_(X) *UC_(prev), *UC_(next); #endif #ifdef LIST_UD_NAME struct PT_(X) *UD_(prev), *UD_(next); #endif }; /** A single link in the linked-list derived from {<T>}. Storage of this structure is the responsibility of the caller. The {<T>} is stored in the element {data}. */ struct T_(Link); struct T_(Link) { T data; struct PT_(X) x; }; /** Serves as an a head for linked-list(s) of {<T>Link}. Use \see{<T>ListClear} to initialise. */ struct T_(List); struct T_(List) { /* These are sentinels such that {head.prev} and {tail.next} are always and the only ones to be null. This allows {List} and all {Links} to be closed, that is with a single pointer, we can infer every other. However, careful in changing this, \see{<PT>_list_<U>_self_correct}, {LIST_EMPTY[2-4]}. */ struct PT_(X) head, tail; }; /** Takes {<T>}; used in \see{<T>List<U>ForEach}. This definition is about the {LIST_NAME} type, that is, it is without the prefix {List}; to avoid namespace collisions, this is private, meaning the name is mangled. If one want this definition, re-declare it. */ typedef void (*PT_(Action))(T *const); /** Takes {<T>} and <void *>; used in \see{<T>List<U>BiForEach}. */ typedef void (*PT_(BiAction))(T *const, void *const); /** Takes {<T>}, returns (non-zero) true or (zero) false. */ typedef int (*PT_(Predicate))(const T *const); /** Takes {<T>} and {void *}, returns (non-zero) true or (zero) false. */ typedef int (*PT_(BiPredicate))(T *const, void *const); #ifdef LIST_SOME_COMPARATOR /* <-- comp */ /** Compares two {<T>} values and returns less then, equal to, or greater then zero. Should do so forming an equivalence relation with respect to {<T>}. */ typedef int (*PT_(Comparator))(const T *, const T *); #endif /* comp --> */ #ifdef LIST_TO_STRING /* <-- string */ /** Responsible for turning {<T>} (the first argument) into a 12 {char} null-terminated output string (the second.) */ typedef void (*PT_(ToString))(const T *const, char (*const)[12]); /* Check that {LIST_TO_STRING} is a function implementing {<T>ToString}. */ static const PT_(ToString) PT_(to_string) = (LIST_TO_STRING); #endif /* string --> */ /** Private: {container_of}. */ static struct T_(Link) *PT_(x_upcast)(struct PT_(X) *const x) { return (struct T_(Link) *)(void *) ((char *)x - offsetof(struct T_(Link), x)); } #ifdef LIST_TO_STRING /* <-- string */ /** Private: {container_of}. */ static const struct T_(Link) *PT_(const_x_upcast)(const struct PT_(X) *const x){ return (const struct T_(Link) *)(const void *) ((const char *)x - offsetof(struct T_(Link), x)); } #endif /* string --> */ /** Private: {container_of}. */ static struct T_(Link) *PT_(data_upcast)(T *const data) { return (struct T_(Link) *)(void *) ((char *)data - offsetof(struct T_(Link), data)); } /** Private: {container_of}; used for \see{<T>List<U>Next}, {etc}. */ static const struct T_(Link) *PT_(const_data_upcast)(const T *const data) { return (const struct T_(Link) *)(const void *) ((const char *)data - offsetof(struct T_(Link), data)); } /** Private: used in \see{<PT>_order_<U>_migrate_each}; \${ptr \in [begin, end) -> ptr += delta}. */ static int PT_(migrate)(struct PT_(X) **const x_ptr, const struct Migrate *const migrate) { const void *const x = *x_ptr; assert(x_ptr); if(x < migrate->begin || x >= migrate->end) return 0; *(char **)x_ptr += migrate->delta; return 1; } /* Prototypes: needed for the next section, but undefined until later. */ static void PT_(add_before)(struct PT_(X) *const, struct PT_(X) *const); static void PT_(add_after)(struct PT_(X) *const, struct PT_(X) *const); static void PT_(remove)(struct PT_(X) *const); /* Note to future self: recursive includes. The {LIST_U_NAME} pre-processor flag controls this behaviour; we are currently in the {!LIST_U_NAME} section. These will get all the functions with {<U>} in them from below. */ #ifdef LIST_UA_NAME /* <-- a */ #define LIST_U_NAME LIST_UA_NAME #ifdef LIST_UA_COMPARATOR /* <-- comp */ #define LIST_U_COMPARATOR LIST_UA_COMPARATOR #endif /* comp --> */ #include "List.h" #endif /* a --> */ #ifdef LIST_UB_NAME /* <-- b */ #define LIST_U_NAME LIST_UB_NAME #ifdef LIST_UB_COMPARATOR /* <-- comp */ #define LIST_U_COMPARATOR LIST_UB_COMPARATOR #endif /* comp --> */ #include "List.h" #endif /* b --> */ #ifdef LIST_UC_NAME /* <-- c */ #define LIST_U_NAME LIST_UC_NAME #ifdef LIST_UC_COMPARATOR /* <-- comp */ #define LIST_U_COMPARATOR LIST_UC_COMPARATOR #endif /* comp --> */ #include "List.h" #endif /* c --> */ #ifdef LIST_UD_NAME /* <-- d */ #define LIST_U_NAME LIST_UD_NAME #ifdef LIST_UD_COMPARATOR /* <-- comp */ #define LIST_U_COMPARATOR LIST_UD_COMPARATOR #endif /* comp --> */ #include "List.h" #endif /* d --> */ /** Private: add before {x}. */ static void PT_(add_before)(struct PT_(X) *const x, struct PT_(X) *const add) { assert(x && add && x != add); #ifdef LIST_UA_NAME /* <-- a */ PT_UA_(x, add_before)(x, add); #endif /* a --> */ #ifdef LIST_UB_NAME /* <-- b */ PT_UB_(x, add_before)(x, add); #endif /* b --> */ #ifdef LIST_UC_NAME /* <-- c */ PT_UC_(x, add_before)(x, add); #endif /* c --> */ #ifdef LIST_UD_NAME /* <-- d */ PT_UD_(x, add_before)(x, add); #endif /* d --> */ } /** Private: add after {x}. */ static void PT_(add_after)(struct PT_(X) *const x, struct PT_(X) *const add) { assert(x && add && x != add); #ifdef LIST_UA_NAME /* <-- a */ PT_UA_(x, add_after)(x, add); #endif /* a --> */ #ifdef LIST_UB_NAME /* <-- b */ PT_UB_(x, add_after)(x, add); #endif /* b --> */ #ifdef LIST_UC_NAME /* <-- c */ PT_UC_(x, add_after)(x, add); #endif /* c --> */ #ifdef LIST_UD_NAME /* <-- d */ PT_UD_(x, add_after)(x, add); #endif /* d --> */ } /** Private: remove from list. @implements <T>LinkAction */ static void PT_(remove)(struct PT_(X) *const x) { #ifdef LIST_UA_NAME /* <-- a */ PT_UA_(x, remove)(x); #endif /* a --> */ #ifdef LIST_UB_NAME /* <-- b */ PT_UB_(x, remove)(x); #endif /* b --> */ #ifdef LIST_UC_NAME /* <-- c */ PT_UC_(x, remove)(x); #endif /* c --> */ #ifdef LIST_UD_NAME /* <-- d */ PT_UD_(x, remove)(x); #endif /* d --> */ } /** Private: clears and initialises. */ static void PT_(clear)(struct T_(List) *const list) { assert(list); #ifdef LIST_UA_NAME list->head.UA_(prev) = list->tail.UA_(next) = 0; list->head.UA_(next) = &list->tail; list->tail.UA_(prev) = &list->head; #endif #ifdef LIST_UB_NAME list->head.UB_(prev) = list->tail.UB_(next) = 0; list->head.UB_(next) = &list->tail; list->tail.UB_(prev) = &list->head; #endif #ifdef LIST_UC_NAME list->head.UC_(prev) = list->tail.UC_(next) = 0; list->head.UC_(next) = &list->tail; list->tail.UC_(prev) = &list->head; #endif #ifdef LIST_UD_NAME list->head.UD_(prev) = list->tail.UD_(next) = 0; list->head.UD_(next) = &list->tail; list->tail.UD_(prev) = &list->head; #endif } /** Private: add all {from} before {x}. */ static void PT_(add_list_before)(struct PT_(X) *const x, struct T_(List) *const from) { assert(x && from); #ifdef LIST_UA_NAME /* <-- a */ PT_UA_(x, cat)(x, from); #endif /* a --> */ #ifdef LIST_UB_NAME /* <-- b */ PT_UB_(x, cat)(x, from); #endif /* b --> */ #ifdef LIST_UC_NAME /* <-- c */ PT_UC_(x, cat)(x, from); #endif /* c --> */ #ifdef LIST_UD_NAME /* <-- d */ PT_UD_(x, cat)(x, from); #endif /* d --> */ } /** Clears and removes all values from {list}, thereby initialising the {<T>List}. All previous values are un-associated. Do not use an un-initialised or default statically initialised list. One can initialise statically using the initialisation constant expression contained in the macro {struct <T>List list = LIST_EMPTY(list);}, or {LIST_EMPTY_[2-4](list);}, depending on how many orders that are in the list. @param list: if null, does nothing. @order \Theta(1) @allow */ static void T_(ListClear)(struct T_(List) *const list) { if(!list) return; PT_(clear)(list); } /** Initialises the contents of the node which contains {add} to add it to the beginning of {list}. If either {list} or {add} is null, it does nothing. @param add: Must be inside of a {<T>Link} and not associated to any list; this associates the {<T>Link} with the list. @order \Theta(1) @fixme Untested. @allow */ static void T_(ListUnshift)(struct T_(List) *const list, T *const add) { if(!list || !add) return; PT_(add_after)(&list->head, &PT_(data_upcast)(add)->x); } /** Initialises the contents of the node which contains {add} to add it to the end of {list}. @param list: If null, does nothing. @param add: If null, does nothing, otherwise must be inside of a {<T>Link} and not associated to any list; this associates the {<T>Link} that {add} is a part of with {list}, if it exists. @order \Theta(1) @allow */ static void T_(ListPush)(struct T_(List) *const list, T *const add) { if(!list || !add) return; PT_(add_before)(&list->tail, &PT_(data_upcast)(add)->x); } /** Initialises the contents of the node which contains {add} to add it immediately before {data}. @param data: If null, does nothing, otherwise must be part of a list. @param add: If null, does nothing, otherwise must be inside of a {<T>Link} and not associated to any list; this associates the {<T>Link} with the list of which {data} is a part, if it exists. @order \Theta(1) @fixme Untested. @allow */ static void T_(ListAddBefore)(T *const data, T *const add) { if(!data || !add) return; PT_(add_before)(&PT_(data_upcast)(data)->x, &PT_(data_upcast)(add)->x); } /** Initialises the contents of the node which contains {add} to add it immediately after {data}. @param data: If null, does nothing, otherwise must be part of a list. @param add: If null, does nothing, otherwise must be inside of a {<T>Link} and not associated to any list; this associates the {<T>Link} with the list of which {data} is a part, if it exists. @order \Theta(1) @fixme Untested. @allow */ static void T_(ListAddAfter)(T *const data, T *const add) { if(!data || !add) return; PT_(add_after)(&PT_(data_upcast)(data)->x, &PT_(data_upcast)(add)->x); } /** Un-associates {data} from the list; consequently, the {data} is free to add to another list or delete. Removing an element that was not added to a list results in undefined behaviour. @param data: If null, does nothing. @order \Theta(1) @allow */ static void T_(ListRemove)(T *const data) { if(!data) return; PT_(remove)(&PT_(data_upcast)(data)->x); } /** Appends the elements of {from} onto {list}. Unlike \see{<T>List<U>TakeIf} and all other selective choosing functions, that is, the ones with {<U>}, this function preserves two or more orders. @param list: If null, then it removes elements. @param from: If null, it does nothing, otherwise this list will be empty on return. @order \Theta(1) @allow */ static void T_(ListTake)(struct T_(List) *const list, struct T_(List) *const from) { if(!from || from == list) return; if(!list) { PT_(clear)(from); return; } PT_(add_list_before)(&list->tail, from); } /** Appends the elements from {from} before {data}. @param data: If null, does nothing, otherwise if not part of a valid list, results are undefined. @param from: If null, does nothing, otherwise this list will be empty on return. @order \Theta(1) @fixme Untested. @allow */ static void T_(ListTakeBefore)(T *const data, struct T_(List) *const from) { if(!data || !from) return; PT_(add_list_before)(&PT_(data_upcast)(data)->x, from); } #ifdef LIST_SOME_COMPARATOR /* <-- comp */ /** Merges the elements from {from} into {list}. This uses local order and it doesn't sort them first; see \see{<T>ListSort}. Concatenates all lists that don't have a {LIST_COMPARATOR} or {LIST_U[A-D]_COMPARATOR}. @param list: if null, then it removes elements. @param from: if null, does nothing, otherwise this list will be empty on return. @order O({list}.n + {from}.n) @allow */ static void T_(ListMerge)(struct T_(List) *const list, struct T_(List) *const from) { if(!from || from == list) return; if(!list) { PT_(clear)(from); return; } #ifdef LIST_OPENMP /* <-- omp */ #pragma omp parallel sections #endif /* omp --> */ { #ifdef LIST_UA_NAME /* <-- a */ #ifdef LIST_UA_COMPARATOR /* <-- comp */ #ifdef LIST_OPENMP /* <-- omp */ #pragma omp section #endif /* omp --> */ PT_UA_(list, merge)(list, from); #else /* comp --><-- !comp */ PT_UA_(list, cat)(list, from); #endif /* !comp --> */ #endif /* a --> */ #ifdef LIST_UB_NAME /* <-- b */ #ifdef LIST_UB_COMPARATOR /* <-- comp */ #ifdef LIST_OPENMP /* <-- omp */ #pragma omp section #endif /* omp --> */ PT_UB_(list, merge)(list, from); #else /* comp --><-- !comp */ PT_UB_(list, cat)(list, from); #endif /* !comp --> */ #endif /* b --> */ #ifdef LIST_UC_NAME /* <-- c */ #ifdef LIST_UC_COMPARATOR /* <-- comp */ #ifdef LIST_OPENMP /* <-- omp */ #pragma omp section #endif /* omp --> */ PT_UC_(list, merge)(list, from); #else /* comp --><-- !comp */ PT_UC_(list, cat)(list, from); #endif /* !comp --> */ #endif /* c --> */ #ifdef LIST_UD_NAME /* <-- d */ #ifdef LIST_UD_COMPARATOR /* <-- comp */ #ifdef LIST_OPENMP /* <-- omp */ #pragma omp section #endif /* omp --> */ PT_UD_(list, merge)(list, from); #else /* comp --><-- !comp */ PT_UD_(list, cat)(list, from); #endif /* !comp --> */ #endif /* d --> */ } } #ifndef LIST_U_ANONYMOUS /* <-- !anon; already has ListSort, T_U_(List, Sort) */ /** Performs a stable, adaptive sort on all orders which have comparators. @param list: If null, does nothing. @order \Omega({list}.n), O({list}.n log {list}.n) @allow */ static void T_(ListSort)(struct T_(List) *const list) { if(!list) return; #ifdef LIST_OPENMP /* <-- omp */ #pragma omp parallel sections #endif /* omp --> */ { #ifdef LIST_UA_COMPARATOR /* <-- a */ #ifdef LIST_OPENMP /* <-- omp */ #pragma omp section #endif /* omp --> */ PT_UA_(natural, sort)(list); #endif /* a --> */ #ifdef LIST_UB_COMPARATOR /* <-- b */ #ifdef LIST_OPENMP /* <-- omp */ #pragma omp section #endif /* omp --> */ PT_UB_(natural, sort)(list); #endif /* b --> */ #ifdef LIST_UC_COMPARATOR /* <-- c */ #ifdef LIST_OPENMP /* <-- omp */ #pragma omp section #endif /* omp --> */ PT_UC_(natural, sort)(list); #endif /* c --> */ #ifdef LIST_UD_COMPARATOR /* <-- d */ #ifdef LIST_OPENMP /* <-- omp */ #pragma omp section #endif /* omp --> */ PT_UD_(natural, sort)(list); #endif /* d --> */ } } #endif /* !anon --> */ #endif /* comp --> */ /** Adjusts one {<T>Link}'s internal pointers when supplied with a {Migrate} parameter. Specifically, if an agglomeration including the to the {<T>Link} pointers are changing with a new element from {Pool}, one must call this in the function you give to {POOL_MIGRATE_EACH}. @param data: If null, does nothing. @param migrate: If null, does nothing. Should only be called in a {Migrate} function; pass the {migrate} parameter. @implements <<T>Link>Migrate @order \Theta(n) @allow */ static void T_(LinkMigrate)(T *const data, const struct Migrate *const migrate){ struct PT_(X) *x; /* Relies on not-strictly-defined behaviour because pointers are not necessarily contiguous in memory; it should be fine in practice. */ if(!data || !migrate || !migrate->delta) return; x = &PT_(data_upcast)(data)->x; #ifdef LIST_OPENMP /* <-- omp */ #pragma omp parallel sections #endif /* omp --> */ { #ifdef LIST_UA_NAME /* <-- a */ #ifdef LIST_OPENMP /* <-- omp */ #pragma omp section #endif /* omp --> */ PT_UA_(x, migrate)(x, migrate); #endif /* a --> */ #ifdef LIST_UB_NAME /* <-- b */ #ifdef LIST_OPENMP /* <-- omp */ #pragma omp section #endif /* omp --> */ PT_UB_(x, migrate)(x, migrate); #endif /* b --> */ #ifdef LIST_UC_NAME /* <-- c */ #ifdef LIST_OPENMP /* <-- omp */ #pragma omp section #endif /* omp --> */ PT_UC_(x, migrate)(x, migrate); #endif /* c --> */ #ifdef LIST_UD_NAME /* <-- d */ #ifdef LIST_OPENMP /* <-- omp */ #pragma omp section #endif /* omp --> */ PT_UD_(x, migrate)(x, migrate); #endif /* d --> */ } } /** Adjusts a pointer, {pdata}, to a {<T>Link}, given {migrate}. Use when some (external?) data has a pointer to the list. @param pdata, migrate: If null, does nothing. @fixme Untested. */ static void T_(LinkMigratePointer)(T **const pdata, const struct Migrate *const migrate) { const void *data; if(!pdata || !migrate) return; data = *pdata; if(data < migrate->begin || data >= migrate->end) return; *(char **)pdata += migrate->delta; } /** One must call this whenever the {<T>List} changes memory locations, (not the nodes.) This resets and corrects the two ends; the two ends become invalid even when it's empty. (For example, a {Pool} of {<T>List} would call this.) @param list: If null, does nothing. @order O(1) @fixme Untested. @allow */ static void T_(ListSelfCorrect)(struct T_(List) *const list) { if(!list) return; #ifdef LIST_OPENMP /* <-- omp */ #pragma omp parallel sections #endif /* omp --> */ { #ifdef LIST_UA_NAME /* <-- a */ #ifdef LIST_OPENMP /* <-- omp */ #pragma omp section #endif /* omp --> */ PT_UA_(list, self_correct)(list); #endif /* a --> */ #ifdef LIST_UB_NAME /* <-- b */ #ifdef LIST_OPENMP /* <-- omp */ #pragma omp section #endif /* omp --> */ PT_UB_(list, self_correct)(list); #endif /* b --> */ #ifdef LIST_UC_NAME /* <-- c */ #ifdef LIST_OPENMP /* <-- omp */ #pragma omp section #endif /* omp --> */ PT_UC_(list, self_correct)(list); #endif /* c --> */ #ifdef LIST_UD_NAME /* <-- d */ #ifdef LIST_OPENMP /* <-- omp */ #pragma omp section #endif /* omp --> */ PT_UD_(list, self_correct)(list); #endif /* d --> */ } } /** Debugging purposes. */ static void T_(ListAudit)(const struct T_(List) *const list) { size_t i, j = 0; int is_j = 0; if(!list) return; #ifdef LIST_OPENMP /* <-- omp */ #pragma omp parallel sections #endif /* omp --> */ { #ifdef LIST_UA_NAME /* <-- a */ #ifdef LIST_OPENMP /* <-- omp */ #pragma omp section #endif /* omp --> */ i = PT_UA_(x, audit)(list); if(is_j) assert(i == j); else (j = i, is_j = 1); #endif /* a --> */ #ifdef LIST_UB_NAME /* <-- b */ #ifdef LIST_OPENMP /* <-- omp */ #pragma omp section #endif /* omp --> */ i = PT_UB_(x, audit)(list); if(is_j) assert(i == j); else (j = i, is_j = 1); #endif /* b --> */ #ifdef LIST_UC_NAME /* <-- c */ #ifdef LIST_OPENMP /* <-- omp */ #pragma omp section #endif /* omp --> */ i = PT_UC_(x, audit)(list); if(is_j) assert(i == j); else (j = i, is_j = 1); #endif /* c --> */ #ifdef LIST_UD_NAME /* <-- d */ #ifdef LIST_OPENMP /* <-- omp */ #pragma omp section #endif /* omp --> */ i = PT_UD_(x, audit)(list); if(is_j) assert(i == j); else (j = i, is_j = 1); #endif /* d --> */ } } #ifdef LIST_TEST /* <-- test */ #include "../test/TestList.h" /* Need this file if one is going to run tests. */ #endif /* test --> */ static void PT_(unused_coda)(void); /** This silences unused function warnings from the pre-processor, but allows optimisation, (hopefully.) \url{ http://stackoverflow.com/questions/43841780/silencing-unused-static-function-warnings-for-a-section-of-code } */ static void PT_(unused_list)(void) { T_(ListClear)(0); T_(ListUnshift)(0, 0); T_(ListPush)(0, 0); T_(ListAddBefore)(0, 0); T_(ListAddAfter)(0, 0); T_(ListRemove)(0); T_(ListTake)(0, 0); T_(ListTakeBefore)(0, 0); #ifdef LIST_SOME_COMPARATOR /* <-- comp */ T_(ListMerge)(0, 0); T_(ListSort)(0); #endif /* comp --> */ T_(LinkMigrate)(0, 0); T_(LinkMigratePointer)(0, 0); T_(ListSelfCorrect)(0); T_(ListAudit)(0); PT_(unused_coda)(); } /** {clang}'s pre-processor is not fooled if one has one function. */ static void PT_(unused_coda)(void) { PT_(unused_list)(); } /* Un-define all macros. */ #undef LIST_NAME #undef LIST_TYPE /* Undocumented; allows nestled inclusion so long as: {CAT_}, {CAT}, {PCAT}, {PCAT_} conform, and {T}, and {U}, are not used. */ #ifdef LIST_SUBTYPE /* <-- sub */ #undef LIST_SUBTYPE #else /* sub --><-- !sub */ #undef CAT #undef CAT_ #undef PCAT #undef PCAT_ #endif /* !sub --> */ #undef T #undef T_ #undef PT_ #ifdef LIST_TO_STRING #undef LIST_TO_STRING #endif #ifdef LIST_FILLER #undef LIST_FILLER #endif #ifdef LIST_COMPARATOR #undef LIST_COMPARATOR #endif #ifdef LIST_U_ANONYMOUS #undef LIST_U_ANONYMOUS #endif #ifdef LIST_UA_NAME #undef LIST_UA_NAME #endif #ifdef LIST_UA_COMPARATOR #undef LIST_UA_COMPARATOR #endif #ifdef LIST_UB_NAME #undef LIST_UB_NAME #endif #ifdef LIST_UB_COMPARATOR #undef LIST_UB_COMPARATOR #endif #ifdef LIST_UC_NAME #undef LIST_UC_NAME #endif #ifdef LIST_UC_COMPARATOR #undef LIST_UC_COMPARATOR #endif #ifdef LIST_UD_NAME #undef LIST_UD_NAME #endif #ifdef LIST_UD_COMPARATOR #undef LIST_UD_COMPARATOR #endif #ifdef LIST_OPENMP #undef LIST_OPENMP #endif #ifdef LIST_TEST #undef LIST_TEST #endif #ifdef LIST_DEBUG #undef LIST_DEBUG #endif #ifdef LIST_NDEBUG #undef LIST_NDEBUG #undef NDEBUG #endif #ifdef LIST_SOME_COMPARATOR #undef LIST_SOME_COMPARATOR #endif #ifdef LIST_SORT_INTERNALS #undef LIST_SORT_INTERNALS /* Each List type has their own. */ #endif #else /* !LIST_U_NAME --><-- LIST_U_NAME Internally #included. @param LIST_U_NAME: A unique name of the linked list; required; @param LIST_U_COMPARATOR: an optional comparator. */ /* Generics using the preprocessor. */ #ifdef T_U_ #undef T_U_ #endif #ifdef PT_U_ #undef PT_U_ #endif #ifdef U_ #undef U_ #endif #ifdef LIST_U_ANONYMOUS /* <-- anon: "empty macro arguments standardized C99" */ #define U_(thing) PCAT(anonymous, thing) #define T_U_(thing1, thing2) CAT(CAT(LIST_NAME, thing1), thing2) #define PT_U_(thing1, thing2) PCAT(list, PCAT(PCAT(LIST_NAME, thing1), \ CAT(_, thing2))) #else /* anon --><-- !anon */ #define U_(thing) PCAT(LIST_U_NAME, thing) #define T_U_(thing1, thing2) CAT(CAT(LIST_NAME, thing1), \ CAT(LIST_U_NAME, thing2)) #define PT_U_(thing1, thing2) PCAT(list, PCAT(PCAT(LIST_NAME, thing1), \ PCAT(LIST_U_NAME, thing2))) #endif /* !anon --> */ /** "Floyd's" tortoise-hare algorithm for cycle detection when in debug mode. One does not want cycles! */ static void PT_U_(cycle, crash)(struct PT_(X) *const x) { #ifdef LIST_DEBUG struct PT_(X) *turtle, *hare; assert(x); for(turtle = x; turtle->U_(prev); turtle = turtle->U_(prev)); for(hare = turtle->U_(next); (turtle = turtle->U_(next), hare = hare->U_(next)) && (hare = hare->U_(next)); ) { assert(turtle != hare); } #else (void)(x); #endif } /** Private: add {add} before {x}. */ static void PT_U_(x, add_before)(struct PT_(X) *const x, struct PT_(X) *const add) { assert(x && add && x != add && x->U_(prev)); add->U_(prev) = x->U_(prev); add->U_(next) = x; x->U_(prev)->U_(next) = add; x->U_(prev) = add; PT_U_(cycle, crash)(add); } /** Private: add {add} after {x}. */ static void PT_U_(x, add_after)(struct PT_(X) *const x, struct PT_(X) *const add) { assert(x && add && x != add && x->U_(next)); add->U_(prev) = x; add->U_(next) = x->U_(next); x->U_(next)->U_(prev) = add; x->U_(next) = add; PT_U_(cycle, crash)(add); } /** Private: list remove in {<U>}. */ static void PT_U_(x, remove)(struct PT_(X) *const x) { assert(x->U_(prev) && x->U_(next)); x->U_(prev)->U_(next) = x->U_(next); x->U_(next)->U_(prev) = x->U_(prev); x->U_(prev) = x->U_(next) = 0; /* Just to be clean. */ } /** Private: cats all {from} in front of {x}, (don't cat {head}, instead {head->next}); {from} will be empty after. Careful that {x} is not in {from} because that will just erase the list. @order \Theta(1) */ static void PT_U_(x, cat)(struct PT_(X) *const x, struct T_(List) *const from) { assert(x && from && x->U_(prev) && !from->head.U_(prev) && from->head.U_(next) && from->tail.U_(prev) && !from->tail.U_(next)); from->head.U_(next)->U_(prev) = x->U_(prev); x->U_(prev)->U_(next) = from->head.U_(next); from->tail.U_(prev)->U_(next) = x; x->U_(prev) = from->tail.U_(prev); from->head.U_(next) = &from->tail; from->tail.U_(prev) = &from->head; PT_U_(cycle, crash)(x); PT_U_(cycle, crash)(&from->head); } /** Private: callback when {realloc} changes pointers. Called in \see{PT_(migrate_each)}. @order \Theta(1) */ static void PT_U_(x, migrate)(struct PT_(X) *const x, const struct Migrate *const migrate) { int is; assert(x && x->U_(prev) && x->U_(next) && migrate && migrate->begin && migrate->begin < migrate->end && migrate->delta); /* If node out of the migration region, it must have node into. Otherwise, assume the other node is on the list of migrates. */ if(!PT_(migrate)(&x->U_(prev), migrate)) is = PT_(migrate)(&x->U_(prev)->U_(next), migrate), assert(is); if(!PT_(migrate)(&x->U_(next), migrate)) is = PT_(migrate)(&x->U_(next)->U_(prev), migrate), assert(is); (void)is; } /** Private: when the actual list but not the data changes locations. */ static void PT_U_(list, self_correct)(struct T_(List) *const list) { assert(sizeof(T) > 0); /* This is a kind of hack relying on {tail, head} to be in packed order in {<T>List} but not in {<T>Link}. */ if(list->head.U_(next) == list->tail.U_(prev) + 1) { list->head.U_(next) = &list->tail; list->tail.U_(prev) = &list->head; } else { list->head.U_(next)->U_(prev) = &list->head; list->tail.U_(prev)->U_(next) = &list->tail; } } /** @param data: Must be part of a {List}. If {data} are not part of a valid list or has migrated locations due to a backing {realloc}, this function is undefined. If null, returns null. @return The next element in {<U>}. When {data} is the last element, returns null. @order \Theta(1) @allow */ static T *T_U_(List, Next)(const T *const data) { const struct PT_(X) *const x = &PT_(const_data_upcast)(data)->x; struct PT_(X) *next_x; if(!data) return 0; assert(x->U_(next)); if(!(next_x = x->U_(next))->U_(next)) return 0; return &PT_(x_upcast)(next_x)->data; } /** @param data: Must be part of a {List}. If {data} are not part of a valid list or has migrated locations due to a backing {realloc}, this function is undefined. If null, returns null. @return The previous element in {<U>}. When {data} is the first element, returns null. @order \Theta(1) @allow */ static T *T_U_(List, Previous)(const T *const data) { const struct PT_(X) *const x = &PT_(const_data_upcast)(data)->x; struct PT_(X) *prev_x; if(!data) return 0; assert(x->U_(prev)); if(!(prev_x = x->U_(prev))->U_(prev)) return 0; return &PT_(x_upcast)(prev_x)->data; } /** @param list: If null, returns null. @return A pointer to the first element of {list}. @order \Theta(1) @allow */ static T *T_U_(List, First)(const struct T_(List) *const list) { if(!list) return 0; assert(list->head.U_(next)); if(!list->head.U_(next)->U_(next)) return 0; /* Empty. */ return &PT_(x_upcast)(list->head.U_(next))->data; } /** @param list: If null, returns null. @return A pointer to the last element of {list}. @order \Theta(1) @allow */ static T *T_U_(List, Last)(const struct T_(List) *const list) { if(!list) return 0; assert(list->tail.U_(prev)); if(!list->tail.U_(prev)->U_(prev)) return 0; /* Empty. */ return &PT_(x_upcast)(list->tail.U_(prev))->data; } /** Un-associates the first element in the order {<U>} with the list, if the list is not empty. @param list: If null, returns null. @return The erstwhile first element or null if the list was empty. @fixme Untested. */ static T *T_U_(List, Shift)(struct T_(List) *const list) { struct PT_(X) *x; if(!list) return 0; if(!(x = list->head.U_(next))->U_(next)) return 0; PT_(remove)(x); return &PT_(x_upcast)(x)->data; } /** Un-associates the last element in the order {<U>} with the list, if the list is not empty. @param list: If null, returns null. @return The erstwhile last element or null if the list was empty. @fixme Untested. */ static T *T_U_(List, Pop)(struct T_(List) *const list) { struct PT_(X) *x; if(!list) return 0; if(!(x = list->tail.U_(prev))->U_(prev)) return 0; PT_(remove)(x); return &PT_(x_upcast)(x)->data; } #ifdef LIST_U_COMPARATOR /* <-- comp */ /* Check that each of {LIST_COMPARATOR} and {LIST_U[A-D]_COMPARATOR} are functions implementing {<PT>Comparator}. */ static const PT_(Comparator) PT_U_(data, cmp) = (LIST_U_COMPARATOR); /** Private: merges {blist} into {alist} when we don't know anything about the data; on equal elements, places {alist} first. @order {O(n + m)}. */ static void PT_U_(list, merge)(struct T_(List) *const alist, struct T_(List) *const blist) { struct PT_(X) *hind, *a, *b; assert(alist && blist); /* {blist} empty -- that was easy. */ if(!(b = blist->head.U_(next))->U_(next)) return; /* {alist} empty -- {O(1)} cat is more efficient. */ if(!(a = alist->head.U_(next))->U_(next)) { PT_U_(x, cat)(&alist->tail, blist); return; } /* Merge */ for(hind = &alist->head; ; ) { if(PT_U_(data, cmp)(&PT_(x_upcast)(a)->data, &PT_(x_upcast)(b)->data) < 0) { a->U_(prev) = hind, hind = hind->U_(next) = a; if(!(a = a->U_(next))->U_(next)) { b->U_(prev) = hind, hind->U_(next) = b; blist->tail.U_(prev)->U_(next) = &alist->tail, alist->tail.U_(prev) = blist->tail.U_(prev); break; } } else { b->U_(prev) = hind, hind = hind->U_(next) = b; if(!(b = b->U_(next))->U_(next)) { a->U_(prev) = hind, hind->U_(next) = a; break; } } } blist->head.U_(next) = &blist->tail, blist->tail.U_(prev) = &blist->head; } #ifndef LIST_SORT_INTERNALS /* <!-- sort internals only once per translation unit */ #define LIST_SORT_INTERNALS /* A run is a temporary sequence of values in the array that is weakly increasing; we store it's size temporarily. */ struct PT_(Run) { struct PT_(X) *head, *tail; size_t size; }; /* Store the maximum capacity for the indexing with {size_t}. (Much more then we need, in most cases.) \${ range(runs) = Sum_{k=0}^runs 2^{runs-k} - 1 = 2^{runs+1} - 2 2^bits = 2 (r^runs - 1) runs = log(2^{bits-1} + 1) / log 2 runs <= 2^{bits - 1}, 2^{bits + 1} > 0} */ struct PT_(Runs) { struct PT_(Run) run[(sizeof(size_t) << 3) - 1]; size_t run_no; }; #endif /* sort internals --> */ /** Inserts the first element from the larger of two sorted runs, then merges the rest. \cite{Peters2002Timsort}, via \cite{McIlroy1993Optimistic}, does long merges by galloping, but we don't have random access to the data. In practice, this is {2%} slower on randomly distributed keys when the linked-list size is over {500 000}; randomly distributed keys have high insertion times that to well in standard merging. However, it's (potentially much) faster when the keys have structure: observed, {[-2%, 500%]}. */ static void PT_U_(runs, merge)(struct PT_(Runs) *const r) { struct PT_(Run) *const run_a = r->run + r->run_no - 2; struct PT_(Run) *const run_b = run_a + 1; struct PT_(X) *a = run_a->tail, *b = run_b->head, *chosen; assert(r->run_no >= 2); /* @fixme We are doing one-to-many compares in some cases? */ if(run_a->size <= run_b->size) { struct PT_(X) *prev_chosen; /* Run {a} is smaller: downwards insert {b.head} followed by upwards merge. Insert the first element of {b} downwards into {a}. */ for( ; ; ) { if(PT_U_(data, cmp)(&PT_(x_upcast)(a)->data, &PT_(x_upcast)(b)->data) <= 0) { chosen = a; a = a->U_(next); break; } if(!a->U_(prev)) { run_a->head = run_b->head; chosen = b; b = b->U_(next); break; } a = a->U_(prev); } /* Merge upwards; while the lists are interleaved. */ while(chosen->U_(next)) { prev_chosen = chosen; if(PT_U_(data, cmp)(&PT_(x_upcast)(a)->data, &PT_(x_upcast)(b)->data) > 0) { chosen = b; b = b->U_(next); } else { chosen = a; a = a->U_(next); } prev_chosen->U_(next) = chosen; chosen->U_(prev) = prev_chosen; } /* Splice the one list left. */ if(!a) { b->U_(prev) = chosen; chosen->U_(next) = b; run_a->tail = run_b->tail; } else { a->U_(prev) = chosen; chosen->U_(next) = a; } } else { struct PT_(X) *next_chosen; int is_a_tail = 0; /* Run {b} is smaller; upwards insert followed by downwards merge. Insert the last element of {a} upwards into {b}. */ for( ; ; ) { if(PT_U_(data, cmp)(&PT_(x_upcast)(a)->data, &PT_(x_upcast)(b)->data) <= 0) { chosen = b; b = b->U_(prev); break; } /* Here, {a > b}. */ if(!b->U_(next)) { is_a_tail = -1; chosen = a; a = a->U_(prev); break; } b = b->U_(next); } if(!is_a_tail) run_a->tail = run_b->tail; /* Merge downwards, while the lists are interleaved. */ while(chosen->U_(prev)) { next_chosen = chosen; if(PT_U_(data, cmp)(&PT_(x_upcast)(a)->data, &PT_(x_upcast)(b)->data) > 0) { chosen = a; a = a->U_(prev); } else { chosen = b; b = b->U_(prev); } next_chosen->U_(prev) = chosen; chosen->U_(next) = next_chosen; } /* Splice the one list left. */ if(!a) { b->U_(next) = chosen; chosen->U_(prev) = b; run_a->head = run_b->head; } else { a->U_(next) = chosen; chosen->U_(prev) = a; } } run_a->size += run_b->size; r->run_no--; } /** It's kind of experimental. It hasn't been optimised; I think it does useless compares. It's so beautiful. */ static void PT_U_(natural, sort)(struct T_(List) *const list) { /* This is potentially half-a-KB; we had an option to store as a global, but that was probably overkill. */ struct PT_(Runs) runs; struct PT_(Run) *new_run; /* Part of the state machine for classifying points wrt their neighbours. */ enum { UNSURE, INCREASING, DECREASING } mono; /* The data that we are sorting. */ struct PT_(X) *a, *b, *c, *first_iso_a; /* {run_count} is different from {runs.run_no} in that it only increases; only used for calculating the path up the tree. */ size_t run_count, rc; /* The value of the comparison. */ int comp; /* Needs an element. */ a = list->head.U_(next), assert(a); if(!(b = a->U_(next))) return; /* Reset the state machine and output to just {a} in the first run. */ mono = UNSURE; runs.run_no = 1; new_run = runs.run + 0, run_count = (size_t)1; new_run->size = 1; first_iso_a = new_run->head = new_run->tail = a; /* While {a} and {b} are elements (that are consecutive.) {c} may not be. */ for(c = b->U_(next); c; a = b, b = c, c = c->U_(next)) { comp = PT_U_(data, cmp)(&PT_(x_upcast)(a)->data, &PT_(x_upcast)(b)->data); /* State machine that considers runs in both directions -- in practice, slightly slower than only considering increasing runs on most cases; however, I would hate to see my code replaced with one line; reverse order is 15 times faster, but it's not likely. */ if(comp < 0) { /* {a < b}, increasing -- good. */ if(mono != DECREASING) { /* If decreasing, inflection. */ mono = INCREASING; new_run->size++; continue; } } else if(comp > 0) { /* Decreasing; reverse preserving stability. */ if(mono != INCREASING) { /* If increasing, inflection. */ mono = DECREASING; b->U_(next) = first_iso_a; first_iso_a->U_(prev) = b; new_run->head = first_iso_a = b; new_run->size++; continue; } new_run->tail = a; /* Terminating an increasing sequence. */ } else { /* {a} == {b} */ if(mono == DECREASING) { /* Extend. */ struct PT_(X) *const a_next = a->U_(next); b->U_(next) = a_next; a_next->U_(prev) = b; a->U_(next) = b; b->U_(prev) = a; } else { /* Monotone or weakly increasing. */ new_run->tail = b; } new_run->size++; continue; } /* Head and tail don't necessarily correspond to the first and last. */ new_run->head->U_(prev) = new_run->tail->U_(next) = 0; /* Greedy merge: keeps space to {O(log n)} instead of {O(n)}. */ for(rc = run_count; !(rc & 1) && runs.run_no >= 2; rc >>= 1) PT_U_(runs, merge)(&runs); /* Reset the state machine and output to just {b} at the next run. */ mono = UNSURE; assert(runs.run_no < sizeof(runs.run) / sizeof(*runs.run)); new_run = runs.run + runs.run_no++, run_count++; new_run->size = 1; new_run->head = new_run->tail = first_iso_a = b; } /* Terminating the last increasing sequence. */ if(mono == INCREASING) new_run->tail = a; new_run->tail->U_(next) = new_run->head->U_(prev) = 0; /* Clean up the rest; when only one run, propagate list_runs[0] to head. */ while(runs.run_no > 1) PT_U_(runs, merge)(&runs); runs.run[0].head->U_(prev) = &list->head; runs.run[0].tail->U_(next) = &list->tail; list->head.U_(next) = runs.run[0].head; list->tail.U_(prev) = runs.run[0].tail; } /** Sorts {<U>}, but leaves the other lists in {<T>} alone. Must have a comparator defined for the index. @param list: if null, does nothing. @order \Omega({list}.n), O({list}.n log {list}.n) @allow */ static void T_U_(List, Sort)(struct T_(List) *const list) { if(!list) return; PT_U_(natural, sort)(list); } /** Compares two linked-lists as sequences in the order specified by {<U>}. @return The first comparator that is not equal to zero, or 0 if they are equal. Null is considered as before everything else; two null pointers are considered equal. Must have a comparator defined for this index. @implements <<T>List>Comparator @order \Theta(min({alist}.n, {blist}.n)) @allow */ static int T_U_(List, Compare)(const struct T_(List) *const alist, const struct T_(List) *const blist) { struct PT_(X) *a, *b; int diff; /* Null counts as {-\infty}. */ if(!alist) { return blist ? -1 : 0; } else if(!blist) { return 1; } /* Compare element by element. */ for(a = alist->head.U_(next), b = blist->head.U_(next); ; a = a->U_(next), b = b->U_(next)) { if(!a->U_(next)) { return b->U_(next) ? -1 : 0; } else if(!b->U_(next)) { return 1; } else if((diff = PT_U_(data, cmp) (&PT_(x_upcast)(a)->data, &PT_(x_upcast)(b)->data))) { return diff; } } } /* @fixme {P_T_(List, Unique)} remove duplicate values. */ /** Private: {list <- a \mask b}. Prefers {a} to {b} when equal. @order O({a}.n + {b}.n) */ static void PT_U_(boolean, seq)(struct T_(List) *const list, struct T_(List) *const alist, struct T_(List) *const blist, const enum ListOperation mask) { struct PT_(X) *a = alist ? alist->head.U_(next) : 0, *b = blist ? blist->head.U_(next) : 0, *temp; int comp; while(a->U_(next) && b->U_(next)) { comp = PT_U_(data, cmp)(&PT_(x_upcast)(a)->data, &PT_(x_upcast)(b)->data); if(comp < 0) { temp = a, a = a->U_(next); if(mask & LO_SUBTRACTION_AB) { PT_(remove)(temp); if(list) PT_(add_before)(&list->tail, temp); } } else if(comp > 0) { temp = b, b = b->U_(next); if(mask & LO_SUBTRACTION_BA) { PT_(remove)(temp); if(list) PT_(add_before)(&list->tail, temp); } } else { temp = a, a = a->U_(next), b = b->U_(next); if(mask & LO_INTERSECTION) { PT_(remove)(temp); if(list) PT_(add_before)(&list->tail, temp); } } } if(mask & LO_DEFAULT_A) { while((temp = a, a = a->U_(next))) { PT_(remove)(temp); if(list) PT_(add_before)(&list->tail, temp); } } if(mask & LO_DEFAULT_B) { while((temp = b, b = b->U_(next))) { PT_(remove)(temp); if(list) PT_(add_before)(&list->tail, temp); } } } /** Appends {list} with {b} subtracted from {a} as a sequence in {<U>}. Must have a comparator defined. @param list: If null, then it removes elements. @order O({a}.n + {b}.n) @allow */ static void T_U_(List, TakeSubtraction)(struct T_(List) *const list, struct T_(List) *const a, struct T_(List) *const b) { PT_U_(boolean, seq)(list, a, b, LO_SUBTRACTION_AB | LO_DEFAULT_A); } /** Appends {list} with the union of {a} and {b} as a sequence in {<U>}. Equal elements are moved from {a}. @param list: If null, then it removes elements. @order O({a}.n + {b}.n) @allow */ static void T_U_(List, TakeUnion)(struct T_(List) *const list, struct T_(List) *const a, struct T_(List) *const b) { PT_U_(boolean, seq)(list, a, b, LO_SUBTRACTION_AB | LO_SUBTRACTION_BA | LO_INTERSECTION | LO_DEFAULT_A | LO_DEFAULT_B); } /** Appends {list} with the intersection of {a} and {b} as a sequence in {<U>}. Equal elements are moved from {a}. @param list: If null, then it removes elements. @order O({a}.n + {b}.n) @allow */ static void T_U_(List, TakeIntersection)(struct T_(List) *const list, struct T_(List) *const a, struct T_(List) *const b) { PT_U_(boolean, seq)(list, a, b, LO_INTERSECTION); } /** Appends {list} with {a} exclusive-or {b} as a sequence in {<U>}. Equal elements are moved from {a}. @param list: If null, then it removes elements. @order O({a}.n + {b}.n) @allow */ static void T_U_(List, TakeXor)(struct T_(List) *const list, struct T_(List) *const a, struct T_(List) *const b) { PT_U_(boolean, seq)(list, a, b, LO_SUBTRACTION_AB | LO_SUBTRACTION_BA | LO_DEFAULT_A | LO_DEFAULT_B); } #endif /* comp --> */ /** Appends {list} with {from} if {predicate} is null or true in the order specified by {<U>}. @param list: If null, then it removes elements. @param from: If null, does nothing. @order ~ \Theta({list}.n) \times O({predicate}) @allow */ static void T_U_(List, TakeIf)(struct T_(List) *const list, struct T_(List) *const from, const PT_(Predicate) predicate) { struct PT_(X) *x, *next_x; if(!from || from == list) return; for(x = from->head.U_(next); (next_x = x->U_(next)); x = next_x) { if(predicate && !predicate(&PT_(x_upcast)(x)->data)) continue; PT_(remove)(x); if(list) PT_(add_before)(&list->tail, x); } } /** Appends {list} with {from} if {bipredicate} is null or true in the order specified by {<U>}. @param list: If null, then it removes elements. @param from: If null, does nothing. @order ~ \Theta({list}.n) \times O({predicate}) @fixme Void. No. @allow */ static void T_U_(List, BiTakeIf)(struct T_(List) *const list, struct T_(List) *const from, const PT_(BiPredicate) bipredicate, void *const param) { struct PT_(X) *x, *next_x; if(!from || from == list) return; for(x = from->head.U_(next); (next_x = x->U_(next)); x = next_x) { if(bipredicate && !bipredicate(&PT_(x_upcast)(x)->data, param)) continue; PT_(remove)(x); if(list) PT_(add_before)(&list->tail, x); } } /** Performs {action} for each element in {list} in the order specified by {<U>}. @param list, action: If null, does nothing. @order ~ \Theta({list}.n) \times O({action}) @allow */ static void T_U_(List, ForEach)(struct T_(List) *const list, const PT_(Action) action) { struct PT_(X) *x, *next_x; if(!list || !action) return; for(x = list->head.U_(next); (next_x = x->U_(next)); x = next_x) action(&PT_(x_upcast)(x)->data); } /** Performs {biaction} for each element in the list in the order specified by {<U>}. @param list, action: If null, does nothing. @param param: Used as the second parameter of {biaction}. @order ~ \Theta({list}.n) \times O({biaction}) @fixme Untested. @fixme Void. No. @allow */ static void T_U_(List, BiForEach)(struct T_(List) *const list, const PT_(BiAction) biaction, void *const param) { struct PT_(X) *x, *next_x; if(!list || !biaction) return; for(x = list->head.U_(next); (next_x = x->U_(next)); x = next_x) biaction(&PT_(x_upcast)(x)->data, param); } /** Short-circuit evaluates {list} with each item's {predicate}. @param list, predicate: If null, returns null. @return The first {<T>} in the linked-list, ordered by {<U>}, that causes the {predicate} with {<T>} as argument to return false, or null if the {predicate} is true for every case. @order ~ O({list}.n) \times O({predicate}) @allow */ static T *T_U_(List, All)(struct T_(List) *const list, const PT_(Predicate) predicate) { struct PT_(X) *x, *next_x; T *data; if(!list || !predicate) return 0; for(x = list->head.U_(next); (next_x = x->U_(next)); x = next_x) if(data = &PT_(x_upcast)(x)->data, !predicate(data)) return data; return 0; } /** Short-circiut evaluates {list} with each item's {predicate}. @param list, bipredicate: If null, returns null. @param param: Used as the second parameter of {bipredicate}. @return The first {<T>} in the linked-list, ordered by {<U>}, that causes the {bipredicate} with {<T>} and {param} as arguments to return false, or null if the {bipredicate} is true for every case. @order ~ O({list}.n) \times O({predicate}) @fixme Void. No. Have interfaces. @allow */ static T *T_U_(List, BiAll)(struct T_(List) *const list, const PT_(BiPredicate) bipredicate, void *const param) { struct PT_(X) *x, *next_x; T *data; if(!list || !bipredicate) return 0; for(x = list->head.U_(next); (next_x = x->U_(next)); x = next_x) if(data = &PT_(x_upcast)(x)->data, !bipredicate(data, param)) return data; return 0; } #ifdef LIST_TO_STRING /* <-- print */ #ifndef LIST_PRINT_THINGS /* <-- once inside translation unit */ #define LIST_PRINT_THINGS static const char *const list_cat_start = "{"; static const char *const list_cat_end = "}"; static const char *const list_cat_alter_end = "...}"; static const char *const list_cat_sep = ", "; static const char *const list_cat_star = "*"; static const char *const list_cat_null = "null"; struct List_SuperCat { char *print, *cursor; size_t left; int is_truncated; }; static void list_super_cat_init(struct List_SuperCat *const cat, char *const print, const size_t print_size) { cat->print = cat->cursor = print; cat->left = print_size; cat->is_truncated = 0; print[0] = '\0'; } static void list_super_cat(struct List_SuperCat *const cat, const char *const append) { size_t lu_took; int took; if(cat->is_truncated) return; took = sprintf(cat->cursor, "%.*s", (int)cat->left, append); if(took < 0) { cat->is_truncated = -1; return; } /*implementation defined*/ if(took == 0) { return; } if((lu_took = (size_t)took) >= cat->left) cat->is_truncated = -1, lu_took = cat->left - 1; cat->cursor += lu_took, cat->left -= lu_took; } #endif /* once --> */ /** Can print 4 things at once before it overwrites. One must set {LIST_TO_STRING} to a function implementing {<T>ToString} to get this functionality. @return Prints the {list} in a static buffer. @order \Theta(1); it has a 255 character limit; every element takes some of it. @allow */ static char *T_U_(List, ToString)(const struct T_(List) *const list) { static char buffer[4][256]; static int buffer_i; struct List_SuperCat cat; char scratch[12]; const struct PT_(X) *x; assert(strlen(list_cat_alter_end) >= strlen(list_cat_end)); assert(sizeof buffer > strlen(list_cat_alter_end)); list_super_cat_init(&cat, buffer[buffer_i], sizeof *buffer / sizeof **buffer - strlen(list_cat_alter_end)); buffer_i++, buffer_i &= 3; if(!list) { list_super_cat(&cat, list_cat_null); return cat.print; } list_super_cat(&cat, list_cat_start); for(x = list->head.U_(next); x->U_(next); x = x->U_(next)) { if(x != list->head.U_(next)) list_super_cat(&cat, list_cat_sep); PT_(to_string)(&PT_(const_x_upcast)(x)->data, &scratch), scratch[sizeof scratch - 1] = '\0'; list_super_cat(&cat, scratch); if(cat.is_truncated) break; } sprintf(cat.cursor, "%s", cat.is_truncated ? list_cat_alter_end : list_cat_end); return cat.print; } #endif /* print --> */ /** Private: audit index by going though it forwards then back. @return Number of elements. */ static size_t PT_U_(x, audit)(const struct T_(List) *const list) { struct PT_(X) *emu; size_t f = 0, b = 0; assert(list); for(emu = list->head.U_(next); emu->U_(next); emu = emu->U_(next)) f++; for(emu = list->tail.U_(prev); emu->U_(prev); emu = emu->U_(prev)) b++; assert(f == b); return f; } static void PT_U_(sub_unused, coda)(void); /** This silences unused function warnings from the pre-processor, but allows optimisation, (hopefully.) \url{ http://stackoverflow.com/questions/43841780/silencing-unused-static-function-warnings-for-a-section-of-code } */ static void PT_U_(sub_unused, list)(void) { T_U_(List, Next)(0); T_U_(List, Previous)(0); T_U_(List, First)(0); T_U_(List, Last)(0); T_U_(List, Shift)(0); T_U_(List, Pop)(0); #ifdef LIST_U_COMPARATOR /* <-- comp */ T_U_(List, Sort)(0); T_U_(List, Compare)(0, 0); T_U_(List, TakeSubtraction)(0, 0, 0); T_U_(List, TakeUnion)(0, 0, 0); T_U_(List, TakeIntersection)(0, 0, 0); T_U_(List, TakeXor)(0, 0, 0); #endif /* comp --> */ T_U_(List, TakeIf)(0, 0, 0); T_U_(List, BiTakeIf)(0, 0, 0, 0); T_U_(List, ForEach)(0, 0); T_U_(List, BiForEach)(0, 0, 0); T_U_(List, All)(0, 0); T_U_(List, BiAll)(0, 0, 0); #ifdef LIST_TO_STRING /* <-- string */ T_U_(List, ToString)(0); #endif /* string --> */ PT_U_(cycle, crash)(0); PT_U_(sub_unused, coda)(); } /** {clang}'s pre-processor is not fooled. */ static void PT_U_(sub_unused, coda)(void) { PT_U_(sub_unused, list)(); } /* Un-define stuff for the next. */ #undef LIST_U_NAME #ifdef LIST_U_COMPARATOR /* <-- comp */ #undef LIST_U_COMPARATOR #endif /* comp --> */ #endif /* LIST_U_NAME --> */
0ee8f4736d289edd2859eaf7303628_p10082.c
#include <complex.h> #include <math.h> #include <petsc.h> #include <stdint.h> #define LOOPY_CALL_WITH_INTEGER_TYPES(MACRO_NAME) \ MACRO_NAME(int8, char) \ MACRO_NAME(int16, short) \ MACRO_NAME(int32, int) \ MACRO_NAME(int64, long) #define LOOPY_DEFINE_FLOOR_DIV_POS_B(SUFFIX, TYPE) \ static inline TYPE loopy_floor_div_pos_b_##SUFFIX(TYPE a, TYPE b) \ { \ if (a<0) \ a = a - (b-1); \ return a/b; \ } LOOPY_CALL_WITH_INTEGER_TYPES(LOOPY_DEFINE_FLOOR_DIV_POS_B) #undef LOOPY_DEFINE_FLOOR_DIV_POS_B #undef LOOPY_CALL_WITH_INTEGER_TYPES #define BYTES8 (8*8) typedef double double8 __attribute__ ((vector_size (BYTES8))); #define BYTES4 (8*4) typedef int int8 __attribute__ ((vector_size (BYTES4))); void wrap_form0_cell_integral_otherwise(int const start, int const end, double *__restrict__ dat2, double const *__restrict__ dat1, double const *__restrict__ glob0, double const *__restrict__ dat0, int const *__restrict__ map0, int const *__restrict__ map1) { double8 form_form0_cell_integral_otherwise __attribute__ ((aligned (64))); double8 form_form0_cell_integral_otherwise_0 __attribute__ ((aligned (64))); double8 form_form0_cell_integral_otherwise_1 __attribute__ ((aligned (64))); double8 form_form0_cell_integral_otherwise_10 __attribute__ ((aligned (64))); double8 form_form0_cell_integral_otherwise_11 __attribute__ ((aligned (64))); double8 form_form0_cell_integral_otherwise_12 __attribute__ ((aligned (64))); double8 form_form0_cell_integral_otherwise_13 __attribute__ ((aligned (64))); double8 form_form0_cell_integral_otherwise_14 __attribute__ ((aligned (64))); double8 form_form0_cell_integral_otherwise_15 __attribute__ ((aligned (64))); double const form_form0_cell_integral_otherwise_16[4] __attribute__ ((aligned (64))) = { 0.041666666666666664, 0.041666666666666664, 0.041666666666666664, 0.041666666666666664 }; double const form_form0_cell_integral_otherwise_17[4 * 4] __attribute__ ((aligned (64))) = { 0.13819660112500914, 0.585410196624969, 0.138196601125011, 0.13819660112501098, 0.13819660112500912, 0.13819660112501092, 0.585410196624969, 0.13819660112501098, 0.13819660112500914, 0.13819660112501095, 0.138196601125011, 0.585410196624969, 0.5854101966249672, 0.13819660112501092, 0.138196601125011, 0.13819660112501098 }; double8 form_form0_cell_integral_otherwise_18 __attribute__ ((aligned (64))); double8 form_form0_cell_integral_otherwise_19 __attribute__ ((aligned (64))); double8 form_form0_cell_integral_otherwise_2 __attribute__ ((aligned (64))); double8 form_form0_cell_integral_otherwise_20 __attribute__ ((aligned (64))); double8 form_form0_cell_integral_otherwise_21 __attribute__ ((aligned (64))); double8 form_form0_cell_integral_otherwise_22[4] __attribute__ ((aligned (64))); double8 form_form0_cell_integral_otherwise_23 __attribute__ ((aligned (64))); double8 form_form0_cell_integral_otherwise_24 __attribute__ ((aligned (64))); double8 form_form0_cell_integral_otherwise_25 __attribute__ ((aligned (64))); double8 form_form0_cell_integral_otherwise_26 __attribute__ ((aligned (64))); double8 form_form0_cell_integral_otherwise_27 __attribute__ ((aligned (64))); double8 form_form0_cell_integral_otherwise_28 __attribute__ ((aligned (64))); double8 form_form0_cell_integral_otherwise_29 __attribute__ ((aligned (64))); double8 form_form0_cell_integral_otherwise_3 __attribute__ ((aligned (64))); double8 form_form0_cell_integral_otherwise_30 __attribute__ ((aligned (64))); double8 form_form0_cell_integral_otherwise_31 __attribute__ ((aligned (64))); double8 form_form0_cell_integral_otherwise_32 __attribute__ ((aligned (64))); double8 form_form0_cell_integral_otherwise_33 __attribute__ ((aligned (64))); double8 form_form0_cell_integral_otherwise_34 __attribute__ ((aligned (64))); double8 form_form0_cell_integral_otherwise_35 __attribute__ ((aligned (64))); double8 form_form0_cell_integral_otherwise_36 __attribute__ ((aligned (64))); double8 form_form0_cell_integral_otherwise_37 __attribute__ ((aligned (64))); double8 form_form0_cell_integral_otherwise_38 __attribute__ ((aligned (64))); double8 form_form0_cell_integral_otherwise_39 __attribute__ ((aligned (64))); double8 form_form0_cell_integral_otherwise_4 __attribute__ ((aligned (64))); double8 form_form0_cell_integral_otherwise_40 __attribute__ ((aligned (64))); double const form_form0_cell_integral_otherwise_41[4] __attribute__ ((aligned (64))) = { -1.0, 0.0, 0.0, 1.0 }; double8 form_form0_cell_integral_otherwise_42 __attribute__ ((aligned (64))); double const form_form0_cell_integral_otherwise_43[4] __attribute__ ((aligned (64))) = { -1.0, 0.0, 1.0, 0.0 }; double8 form_form0_cell_integral_otherwise_44 __attribute__ ((aligned (64))); double const form_form0_cell_integral_otherwise_45[4] __attribute__ ((aligned (64))) = { -1.0, 1.0, 0.0, 0.0 }; double8 form_form0_cell_integral_otherwise_5 __attribute__ ((aligned (64))); double8 form_form0_cell_integral_otherwise_6 __attribute__ ((aligned (64))); double8 form_form0_cell_integral_otherwise_7 __attribute__ ((aligned (64))); double8 form_form0_cell_integral_otherwise_8 __attribute__ ((aligned (64))); double8 form_form0_cell_integral_otherwise_9 __attribute__ ((aligned (64))); double8 t0[4] __attribute__ ((aligned (64))); double8 t1[4 * 3] __attribute__ ((aligned (64))); double8 t2[4] __attribute__ ((aligned (64))); double8 const zero_vec_float64 = { 0.0 }; { /* initial slab for 'n_outer' */ /* */ } { int const n_outer = start + -1 * ((7 + 7 * start) / 8); if (-1 + end + -1 * start >= 0) { for (int i2 = 0; i2 <= 3; ++i2) t2[i2] = zero_vec_float64; for (int i0 = 0; i0 <= 3; ++i0) { for (int i1 = 0; i1 <= 2; ++i1) for (int n_batch = start + -8 * n_outer; n_batch <= (-8 + end + -8 * n_outer >= 0 ? 7 : -1 + end + -8 * n_outer); ++n_batch) t1[(3 * i0 + i1)][n_batch] = dat1[3 * map1[32 * n_outer + 4 * n_batch + i0] + i1]; for (int n_batch = start + -8 * n_outer; n_batch <= (-8 + end + -8 * n_outer >= 0 ? 7 : -1 + end + -8 * n_outer); ++n_batch) t0[i0][n_batch] = dat0[map0[32 * n_outer + 4 * n_batch + i0]]; } for (int n_batch = start + -8 * n_outer; n_batch <= (-8 + end + -8 * n_outer >= 0 ? 7 : -1 + end + -8 * n_outer); ++n_batch) { /* no-op (insn=form__start) */ /* */ } form_form0_cell_integral_otherwise_20 = zero_vec_float64; form_form0_cell_integral_otherwise = -1.0 * t1[0]; form_form0_cell_integral_otherwise_0 = form_form0_cell_integral_otherwise + t1[3]; form_form0_cell_integral_otherwise_1 = -1.0 * t1[1]; form_form0_cell_integral_otherwise_2 = form_form0_cell_integral_otherwise_1 + t1[7]; form_form0_cell_integral_otherwise_3 = -1.0 * t1[2]; form_form0_cell_integral_otherwise_4 = form_form0_cell_integral_otherwise_3 + t1[11]; form_form0_cell_integral_otherwise_5 = form_form0_cell_integral_otherwise_1 + t1[10]; form_form0_cell_integral_otherwise_6 = form_form0_cell_integral_otherwise_3 + t1[8]; form_form0_cell_integral_otherwise_7 = form_form0_cell_integral_otherwise_2 * form_form0_cell_integral_otherwise_4 + -1.0 * form_form0_cell_integral_otherwise_5 * form_form0_cell_integral_otherwise_6; form_form0_cell_integral_otherwise_8 = form_form0_cell_integral_otherwise + t1[6]; form_form0_cell_integral_otherwise_9 = form_form0_cell_integral_otherwise_3 + t1[5]; form_form0_cell_integral_otherwise_10 = form_form0_cell_integral_otherwise_5 * form_form0_cell_integral_otherwise_9; form_form0_cell_integral_otherwise_11 = form_form0_cell_integral_otherwise_1 + t1[4]; form_form0_cell_integral_otherwise_12 = form_form0_cell_integral_otherwise + t1[9]; form_form0_cell_integral_otherwise_13 = form_form0_cell_integral_otherwise_11 * form_form0_cell_integral_otherwise_6 + -1.0 * form_form0_cell_integral_otherwise_2 * form_form0_cell_integral_otherwise_9; form_form0_cell_integral_otherwise_14 = form_form0_cell_integral_otherwise_12 * form_form0_cell_integral_otherwise_13 + form_form0_cell_integral_otherwise_0 * form_form0_cell_integral_otherwise_7 + form_form0_cell_integral_otherwise_8 * (form_form0_cell_integral_otherwise_10 + -1.0 * form_form0_cell_integral_otherwise_11 * form_form0_cell_integral_otherwise_4); #pragma omp simd for (int n_batch_simd = start + -8 * n_outer; n_batch_simd <= (-8 + end + -8 * n_outer >= 0 ? 7 : -1 + end + -8 * n_outer); ++n_batch_simd) form_form0_cell_integral_otherwise_15[n_batch_simd] = fabs(form_form0_cell_integral_otherwise_14[n_batch_simd]); for (int form_j = 0; form_j <= 3; ++form_j) form_form0_cell_integral_otherwise_22[form_j] = zero_vec_float64; for (int form_ip = 0; form_ip <= 3; ++form_ip) { form_form0_cell_integral_otherwise_18 = zero_vec_float64; for (int form_i = 0; form_i <= 3; ++form_i) form_form0_cell_integral_otherwise_18 = form_form0_cell_integral_otherwise_18 + form_form0_cell_integral_otherwise_17[4 * form_ip + form_i] * t0[form_i]; form_form0_cell_integral_otherwise_19 = form_form0_cell_integral_otherwise_16[form_ip] * form_form0_cell_integral_otherwise_15; form_form0_cell_integral_otherwise_20 = form_form0_cell_integral_otherwise_20 + form_form0_cell_integral_otherwise_19; form_form0_cell_integral_otherwise_21 = form_form0_cell_integral_otherwise_19 * glob0[0] * form_form0_cell_integral_otherwise_18; for (int form_j_0 = 0; form_j_0 <= 3; ++form_j_0) form_form0_cell_integral_otherwise_22[form_j_0] = form_form0_cell_integral_otherwise_22[form_j_0] + form_form0_cell_integral_otherwise_17[4 * form_ip + form_j_0] * form_form0_cell_integral_otherwise_21; } form_form0_cell_integral_otherwise_23 = 1.0 / form_form0_cell_integral_otherwise_14; form_form0_cell_integral_otherwise_24 = form_form0_cell_integral_otherwise_7 * form_form0_cell_integral_otherwise_23; form_form0_cell_integral_otherwise_25 = -1.0 * t0[0]; form_form0_cell_integral_otherwise_26 = form_form0_cell_integral_otherwise_25 + t0[1]; form_form0_cell_integral_otherwise_27 = (form_form0_cell_integral_otherwise_10 + form_form0_cell_integral_otherwise_11 * -1.0 * form_form0_cell_integral_otherwise_4) * form_form0_cell_integral_otherwise_23; form_form0_cell_integral_otherwise_28 = form_form0_cell_integral_otherwise_25 + t0[2]; form_form0_cell_integral_otherwise_29 = form_form0_cell_integral_otherwise_13 * form_form0_cell_integral_otherwise_23; form_form0_cell_integral_otherwise_30 = form_form0_cell_integral_otherwise_25 + t0[3]; form_form0_cell_integral_otherwise_31 = form_form0_cell_integral_otherwise_29 * form_form0_cell_integral_otherwise_30 + form_form0_cell_integral_otherwise_24 * form_form0_cell_integral_otherwise_26 + form_form0_cell_integral_otherwise_27 * form_form0_cell_integral_otherwise_28; form_form0_cell_integral_otherwise_32 = (form_form0_cell_integral_otherwise_12 * form_form0_cell_integral_otherwise_6 + form_form0_cell_integral_otherwise_4 * -1.0 * form_form0_cell_integral_otherwise_8) * form_form0_cell_integral_otherwise_23; form_form0_cell_integral_otherwise_33 = (form_form0_cell_integral_otherwise_0 * form_form0_cell_integral_otherwise_4 + form_form0_cell_integral_otherwise_9 * -1.0 * form_form0_cell_integral_otherwise_12) * form_form0_cell_integral_otherwise_23; form_form0_cell_integral_otherwise_34 = (form_form0_cell_integral_otherwise_8 * form_form0_cell_integral_otherwise_9 + -1.0 * form_form0_cell_integral_otherwise_0 * form_form0_cell_integral_otherwise_6) * form_form0_cell_integral_otherwise_23; form_form0_cell_integral_otherwise_35 = form_form0_cell_integral_otherwise_34 * form_form0_cell_integral_otherwise_30 + form_form0_cell_integral_otherwise_32 * form_form0_cell_integral_otherwise_26 + form_form0_cell_integral_otherwise_33 * form_form0_cell_integral_otherwise_28; form_form0_cell_integral_otherwise_36 = (form_form0_cell_integral_otherwise_8 * form_form0_cell_integral_otherwise_5 + -1.0 * form_form0_cell_integral_otherwise_12 * form_form0_cell_integral_otherwise_2) * form_form0_cell_integral_otherwise_23; form_form0_cell_integral_otherwise_37 = (form_form0_cell_integral_otherwise_12 * form_form0_cell_integral_otherwise_11 + -1.0 * form_form0_cell_integral_otherwise_0 * form_form0_cell_integral_otherwise_5) * form_form0_cell_integral_otherwise_23; form_form0_cell_integral_otherwise_38 = (form_form0_cell_integral_otherwise_0 * form_form0_cell_integral_otherwise_2 + -1.0 * form_form0_cell_integral_otherwise_8 * form_form0_cell_integral_otherwise_11) * form_form0_cell_integral_otherwise_23; form_form0_cell_integral_otherwise_39 = form_form0_cell_integral_otherwise_38 * form_form0_cell_integral_otherwise_30 + form_form0_cell_integral_otherwise_36 * form_form0_cell_integral_otherwise_26 + form_form0_cell_integral_otherwise_37 * form_form0_cell_integral_otherwise_28; form_form0_cell_integral_otherwise_40 = (form_form0_cell_integral_otherwise_39 * form_form0_cell_integral_otherwise_38 + form_form0_cell_integral_otherwise_31 * form_form0_cell_integral_otherwise_29 + form_form0_cell_integral_otherwise_35 * form_form0_cell_integral_otherwise_34) * form_form0_cell_integral_otherwise_20; form_form0_cell_integral_otherwise_42 = (form_form0_cell_integral_otherwise_39 * form_form0_cell_integral_otherwise_37 + form_form0_cell_integral_otherwise_31 * form_form0_cell_integral_otherwise_27 + form_form0_cell_integral_otherwise_35 * form_form0_cell_integral_otherwise_33) * form_form0_cell_integral_otherwise_20; form_form0_cell_integral_otherwise_44 = (form_form0_cell_integral_otherwise_39 * form_form0_cell_integral_otherwise_36 + form_form0_cell_integral_otherwise_31 * form_form0_cell_integral_otherwise_24 + form_form0_cell_integral_otherwise_35 * form_form0_cell_integral_otherwise_32) * form_form0_cell_integral_otherwise_20; for (int form_j_1 = 0; form_j_1 <= 3; ++form_j_1) t2[form_j_1] = t2[form_j_1] + form_form0_cell_integral_otherwise_41[form_j_1] * form_form0_cell_integral_otherwise_40 + form_form0_cell_integral_otherwise_43[form_j_1] * form_form0_cell_integral_otherwise_42 + form_form0_cell_integral_otherwise_22[form_j_1] + form_form0_cell_integral_otherwise_45[form_j_1] * form_form0_cell_integral_otherwise_44; for (int n_batch = start + -8 * n_outer; n_batch <= (-8 + end + -8 * n_outer >= 0 ? 7 : -1 + end + -8 * n_outer); ++n_batch) { /* no-op (insn=statement4) */ /* */ } for (int i3 = 0; i3 <= 3; ++i3) for (int n_batch = start + -8 * n_outer; n_batch <= (-8 + end + -8 * n_outer >= 0 ? 7 : -1 + end + -8 * n_outer); ++n_batch) dat2[map0[32 * n_outer + 4 * n_batch + i3]] = dat2[map0[32 * n_outer + 4 * n_batch + i3]] + t2[i3][n_batch]; } } { /* bulk slab for 'n_outer' */ /* */ } for (int n_outer = 1 + start + -1 * ((7 + 7 * start) / 8); n_outer <= -2 + (7 + end) / 8; ++n_outer) { for (int i2 = 0; i2 <= 3; ++i2) t2[i2] = zero_vec_float64; for (int i0 = 0; i0 <= 3; ++i0) { for (int i1 = 0; i1 <= 2; ++i1) for (int n_batch = 0; n_batch <= 7; ++n_batch) t1[(3 * i0 + i1)][n_batch] = dat1[3 * map1[32 * n_outer + 4 * n_batch + i0] + i1]; for (int n_batch = 0; n_batch <= 7; ++n_batch) t0[i0][n_batch] = dat0[map0[32 * n_outer + 4 * n_batch + i0]]; } for (int n_batch = 0; n_batch <= 7; ++n_batch) { /* no-op (insn=form__start) */ /* */ } form_form0_cell_integral_otherwise_20 = zero_vec_float64; form_form0_cell_integral_otherwise = -1.0 * t1[0]; form_form0_cell_integral_otherwise_0 = form_form0_cell_integral_otherwise + t1[3]; form_form0_cell_integral_otherwise_1 = -1.0 * t1[1]; form_form0_cell_integral_otherwise_2 = form_form0_cell_integral_otherwise_1 + t1[7]; form_form0_cell_integral_otherwise_3 = -1.0 * t1[2]; form_form0_cell_integral_otherwise_4 = form_form0_cell_integral_otherwise_3 + t1[11]; form_form0_cell_integral_otherwise_5 = form_form0_cell_integral_otherwise_1 + t1[10]; form_form0_cell_integral_otherwise_6 = form_form0_cell_integral_otherwise_3 + t1[8]; form_form0_cell_integral_otherwise_7 = form_form0_cell_integral_otherwise_2 * form_form0_cell_integral_otherwise_4 + -1.0 * form_form0_cell_integral_otherwise_5 * form_form0_cell_integral_otherwise_6; form_form0_cell_integral_otherwise_8 = form_form0_cell_integral_otherwise + t1[6]; form_form0_cell_integral_otherwise_9 = form_form0_cell_integral_otherwise_3 + t1[5]; form_form0_cell_integral_otherwise_10 = form_form0_cell_integral_otherwise_5 * form_form0_cell_integral_otherwise_9; form_form0_cell_integral_otherwise_11 = form_form0_cell_integral_otherwise_1 + t1[4]; form_form0_cell_integral_otherwise_12 = form_form0_cell_integral_otherwise + t1[9]; form_form0_cell_integral_otherwise_13 = form_form0_cell_integral_otherwise_11 * form_form0_cell_integral_otherwise_6 + -1.0 * form_form0_cell_integral_otherwise_2 * form_form0_cell_integral_otherwise_9; form_form0_cell_integral_otherwise_14 = form_form0_cell_integral_otherwise_12 * form_form0_cell_integral_otherwise_13 + form_form0_cell_integral_otherwise_0 * form_form0_cell_integral_otherwise_7 + form_form0_cell_integral_otherwise_8 * (form_form0_cell_integral_otherwise_10 + -1.0 * form_form0_cell_integral_otherwise_11 * form_form0_cell_integral_otherwise_4); #pragma omp simd for (int n_batch_simd = 0; n_batch_simd <= 7; ++n_batch_simd) form_form0_cell_integral_otherwise_15[n_batch_simd] = fabs(form_form0_cell_integral_otherwise_14[n_batch_simd]); for (int form_j = 0; form_j <= 3; ++form_j) form_form0_cell_integral_otherwise_22[form_j] = zero_vec_float64; for (int form_ip = 0; form_ip <= 3; ++form_ip) { form_form0_cell_integral_otherwise_18 = zero_vec_float64; for (int form_i = 0; form_i <= 3; ++form_i) form_form0_cell_integral_otherwise_18 = form_form0_cell_integral_otherwise_18 + form_form0_cell_integral_otherwise_17[4 * form_ip + form_i] * t0[form_i]; form_form0_cell_integral_otherwise_19 = form_form0_cell_integral_otherwise_16[form_ip] * form_form0_cell_integral_otherwise_15; form_form0_cell_integral_otherwise_20 = form_form0_cell_integral_otherwise_20 + form_form0_cell_integral_otherwise_19; form_form0_cell_integral_otherwise_21 = form_form0_cell_integral_otherwise_19 * glob0[0] * form_form0_cell_integral_otherwise_18; for (int form_j_0 = 0; form_j_0 <= 3; ++form_j_0) form_form0_cell_integral_otherwise_22[form_j_0] = form_form0_cell_integral_otherwise_22[form_j_0] + form_form0_cell_integral_otherwise_17[4 * form_ip + form_j_0] * form_form0_cell_integral_otherwise_21; } form_form0_cell_integral_otherwise_23 = 1.0 / form_form0_cell_integral_otherwise_14; form_form0_cell_integral_otherwise_24 = form_form0_cell_integral_otherwise_7 * form_form0_cell_integral_otherwise_23; form_form0_cell_integral_otherwise_25 = -1.0 * t0[0]; form_form0_cell_integral_otherwise_26 = form_form0_cell_integral_otherwise_25 + t0[1]; form_form0_cell_integral_otherwise_27 = (form_form0_cell_integral_otherwise_10 + form_form0_cell_integral_otherwise_11 * -1.0 * form_form0_cell_integral_otherwise_4) * form_form0_cell_integral_otherwise_23; form_form0_cell_integral_otherwise_28 = form_form0_cell_integral_otherwise_25 + t0[2]; form_form0_cell_integral_otherwise_29 = form_form0_cell_integral_otherwise_13 * form_form0_cell_integral_otherwise_23; form_form0_cell_integral_otherwise_30 = form_form0_cell_integral_otherwise_25 + t0[3]; form_form0_cell_integral_otherwise_31 = form_form0_cell_integral_otherwise_29 * form_form0_cell_integral_otherwise_30 + form_form0_cell_integral_otherwise_24 * form_form0_cell_integral_otherwise_26 + form_form0_cell_integral_otherwise_27 * form_form0_cell_integral_otherwise_28; form_form0_cell_integral_otherwise_32 = (form_form0_cell_integral_otherwise_12 * form_form0_cell_integral_otherwise_6 + form_form0_cell_integral_otherwise_4 * -1.0 * form_form0_cell_integral_otherwise_8) * form_form0_cell_integral_otherwise_23; form_form0_cell_integral_otherwise_33 = (form_form0_cell_integral_otherwise_0 * form_form0_cell_integral_otherwise_4 + form_form0_cell_integral_otherwise_9 * -1.0 * form_form0_cell_integral_otherwise_12) * form_form0_cell_integral_otherwise_23; form_form0_cell_integral_otherwise_34 = (form_form0_cell_integral_otherwise_8 * form_form0_cell_integral_otherwise_9 + -1.0 * form_form0_cell_integral_otherwise_0 * form_form0_cell_integral_otherwise_6) * form_form0_cell_integral_otherwise_23; form_form0_cell_integral_otherwise_35 = form_form0_cell_integral_otherwise_34 * form_form0_cell_integral_otherwise_30 + form_form0_cell_integral_otherwise_32 * form_form0_cell_integral_otherwise_26 + form_form0_cell_integral_otherwise_33 * form_form0_cell_integral_otherwise_28; form_form0_cell_integral_otherwise_36 = (form_form0_cell_integral_otherwise_8 * form_form0_cell_integral_otherwise_5 + -1.0 * form_form0_cell_integral_otherwise_12 * form_form0_cell_integral_otherwise_2) * form_form0_cell_integral_otherwise_23; form_form0_cell_integral_otherwise_37 = (form_form0_cell_integral_otherwise_12 * form_form0_cell_integral_otherwise_11 + -1.0 * form_form0_cell_integral_otherwise_0 * form_form0_cell_integral_otherwise_5) * form_form0_cell_integral_otherwise_23; form_form0_cell_integral_otherwise_38 = (form_form0_cell_integral_otherwise_0 * form_form0_cell_integral_otherwise_2 + -1.0 * form_form0_cell_integral_otherwise_8 * form_form0_cell_integral_otherwise_11) * form_form0_cell_integral_otherwise_23; form_form0_cell_integral_otherwise_39 = form_form0_cell_integral_otherwise_38 * form_form0_cell_integral_otherwise_30 + form_form0_cell_integral_otherwise_36 * form_form0_cell_integral_otherwise_26 + form_form0_cell_integral_otherwise_37 * form_form0_cell_integral_otherwise_28; form_form0_cell_integral_otherwise_40 = (form_form0_cell_integral_otherwise_39 * form_form0_cell_integral_otherwise_38 + form_form0_cell_integral_otherwise_31 * form_form0_cell_integral_otherwise_29 + form_form0_cell_integral_otherwise_35 * form_form0_cell_integral_otherwise_34) * form_form0_cell_integral_otherwise_20; form_form0_cell_integral_otherwise_42 = (form_form0_cell_integral_otherwise_39 * form_form0_cell_integral_otherwise_37 + form_form0_cell_integral_otherwise_31 * form_form0_cell_integral_otherwise_27 + form_form0_cell_integral_otherwise_35 * form_form0_cell_integral_otherwise_33) * form_form0_cell_integral_otherwise_20; form_form0_cell_integral_otherwise_44 = (form_form0_cell_integral_otherwise_39 * form_form0_cell_integral_otherwise_36 + form_form0_cell_integral_otherwise_31 * form_form0_cell_integral_otherwise_24 + form_form0_cell_integral_otherwise_35 * form_form0_cell_integral_otherwise_32) * form_form0_cell_integral_otherwise_20; for (int form_j_1 = 0; form_j_1 <= 3; ++form_j_1) t2[form_j_1] = t2[form_j_1] + form_form0_cell_integral_otherwise_41[form_j_1] * form_form0_cell_integral_otherwise_40 + form_form0_cell_integral_otherwise_43[form_j_1] * form_form0_cell_integral_otherwise_42 + form_form0_cell_integral_otherwise_22[form_j_1] + form_form0_cell_integral_otherwise_45[form_j_1] * form_form0_cell_integral_otherwise_44; for (int n_batch = 0; n_batch <= 7; ++n_batch) { /* no-op (insn=statement4) */ /* */ } for (int i3 = 0; i3 <= 3; ++i3) for (int n_batch = 0; n_batch <= 7; ++n_batch) dat2[map0[32 * n_outer + 4 * n_batch + i3]] = dat2[map0[32 * n_outer + 4 * n_batch + i3]] + t2[i3][n_batch]; } { /* final slab for 'n_outer' */ /* */ } { int const n_outer = loopy_floor_div_pos_b_int32(-1 + end, 8); if (-1 + 8 * n_outer + -1 * start >= 0) { for (int i2 = 0; i2 <= 3; ++i2) t2[i2] = zero_vec_float64; for (int i0 = 0; i0 <= 3; ++i0) { for (int i1 = 0; i1 <= 2; ++i1) for (int n_batch = 0; n_batch <= -1 + end + -8 * n_outer; ++n_batch) t1[(3 * i0 + i1)][n_batch] = dat1[3 * map1[32 * n_outer + 4 * n_batch + i0] + i1]; for (int n_batch = 0; n_batch <= -1 + end + -8 * n_outer; ++n_batch) t0[i0][n_batch] = dat0[map0[32 * n_outer + 4 * n_batch + i0]]; } for (int n_batch = 0; n_batch <= -1 + end + -8 * n_outer; ++n_batch) { /* no-op (insn=form__start) */ /* */ } form_form0_cell_integral_otherwise_20 = zero_vec_float64; form_form0_cell_integral_otherwise = -1.0 * t1[0]; form_form0_cell_integral_otherwise_0 = form_form0_cell_integral_otherwise + t1[3]; form_form0_cell_integral_otherwise_1 = -1.0 * t1[1]; form_form0_cell_integral_otherwise_2 = form_form0_cell_integral_otherwise_1 + t1[7]; form_form0_cell_integral_otherwise_3 = -1.0 * t1[2]; form_form0_cell_integral_otherwise_4 = form_form0_cell_integral_otherwise_3 + t1[11]; form_form0_cell_integral_otherwise_5 = form_form0_cell_integral_otherwise_1 + t1[10]; form_form0_cell_integral_otherwise_6 = form_form0_cell_integral_otherwise_3 + t1[8]; form_form0_cell_integral_otherwise_7 = form_form0_cell_integral_otherwise_2 * form_form0_cell_integral_otherwise_4 + -1.0 * form_form0_cell_integral_otherwise_5 * form_form0_cell_integral_otherwise_6; form_form0_cell_integral_otherwise_8 = form_form0_cell_integral_otherwise + t1[6]; form_form0_cell_integral_otherwise_9 = form_form0_cell_integral_otherwise_3 + t1[5]; form_form0_cell_integral_otherwise_10 = form_form0_cell_integral_otherwise_5 * form_form0_cell_integral_otherwise_9; form_form0_cell_integral_otherwise_11 = form_form0_cell_integral_otherwise_1 + t1[4]; form_form0_cell_integral_otherwise_12 = form_form0_cell_integral_otherwise + t1[9]; form_form0_cell_integral_otherwise_13 = form_form0_cell_integral_otherwise_11 * form_form0_cell_integral_otherwise_6 + -1.0 * form_form0_cell_integral_otherwise_2 * form_form0_cell_integral_otherwise_9; form_form0_cell_integral_otherwise_14 = form_form0_cell_integral_otherwise_12 * form_form0_cell_integral_otherwise_13 + form_form0_cell_integral_otherwise_0 * form_form0_cell_integral_otherwise_7 + form_form0_cell_integral_otherwise_8 * (form_form0_cell_integral_otherwise_10 + -1.0 * form_form0_cell_integral_otherwise_11 * form_form0_cell_integral_otherwise_4); #pragma omp simd for (int n_batch_simd = 0; n_batch_simd <= -1 + end + -8 * n_outer; ++n_batch_simd) form_form0_cell_integral_otherwise_15[n_batch_simd] = fabs(form_form0_cell_integral_otherwise_14[n_batch_simd]); for (int form_j = 0; form_j <= 3; ++form_j) form_form0_cell_integral_otherwise_22[form_j] = zero_vec_float64; for (int form_ip = 0; form_ip <= 3; ++form_ip) { form_form0_cell_integral_otherwise_18 = zero_vec_float64; for (int form_i = 0; form_i <= 3; ++form_i) form_form0_cell_integral_otherwise_18 = form_form0_cell_integral_otherwise_18 + form_form0_cell_integral_otherwise_17[4 * form_ip + form_i] * t0[form_i]; form_form0_cell_integral_otherwise_19 = form_form0_cell_integral_otherwise_16[form_ip] * form_form0_cell_integral_otherwise_15; form_form0_cell_integral_otherwise_20 = form_form0_cell_integral_otherwise_20 + form_form0_cell_integral_otherwise_19; form_form0_cell_integral_otherwise_21 = form_form0_cell_integral_otherwise_19 * glob0[0] * form_form0_cell_integral_otherwise_18; for (int form_j_0 = 0; form_j_0 <= 3; ++form_j_0) form_form0_cell_integral_otherwise_22[form_j_0] = form_form0_cell_integral_otherwise_22[form_j_0] + form_form0_cell_integral_otherwise_17[4 * form_ip + form_j_0] * form_form0_cell_integral_otherwise_21; } form_form0_cell_integral_otherwise_23 = 1.0 / form_form0_cell_integral_otherwise_14; form_form0_cell_integral_otherwise_24 = form_form0_cell_integral_otherwise_7 * form_form0_cell_integral_otherwise_23; form_form0_cell_integral_otherwise_25 = -1.0 * t0[0]; form_form0_cell_integral_otherwise_26 = form_form0_cell_integral_otherwise_25 + t0[1]; form_form0_cell_integral_otherwise_27 = (form_form0_cell_integral_otherwise_10 + form_form0_cell_integral_otherwise_11 * -1.0 * form_form0_cell_integral_otherwise_4) * form_form0_cell_integral_otherwise_23; form_form0_cell_integral_otherwise_28 = form_form0_cell_integral_otherwise_25 + t0[2]; form_form0_cell_integral_otherwise_29 = form_form0_cell_integral_otherwise_13 * form_form0_cell_integral_otherwise_23; form_form0_cell_integral_otherwise_30 = form_form0_cell_integral_otherwise_25 + t0[3]; form_form0_cell_integral_otherwise_31 = form_form0_cell_integral_otherwise_29 * form_form0_cell_integral_otherwise_30 + form_form0_cell_integral_otherwise_24 * form_form0_cell_integral_otherwise_26 + form_form0_cell_integral_otherwise_27 * form_form0_cell_integral_otherwise_28; form_form0_cell_integral_otherwise_32 = (form_form0_cell_integral_otherwise_12 * form_form0_cell_integral_otherwise_6 + form_form0_cell_integral_otherwise_4 * -1.0 * form_form0_cell_integral_otherwise_8) * form_form0_cell_integral_otherwise_23; form_form0_cell_integral_otherwise_33 = (form_form0_cell_integral_otherwise_0 * form_form0_cell_integral_otherwise_4 + form_form0_cell_integral_otherwise_9 * -1.0 * form_form0_cell_integral_otherwise_12) * form_form0_cell_integral_otherwise_23; form_form0_cell_integral_otherwise_34 = (form_form0_cell_integral_otherwise_8 * form_form0_cell_integral_otherwise_9 + -1.0 * form_form0_cell_integral_otherwise_0 * form_form0_cell_integral_otherwise_6) * form_form0_cell_integral_otherwise_23; form_form0_cell_integral_otherwise_35 = form_form0_cell_integral_otherwise_34 * form_form0_cell_integral_otherwise_30 + form_form0_cell_integral_otherwise_32 * form_form0_cell_integral_otherwise_26 + form_form0_cell_integral_otherwise_33 * form_form0_cell_integral_otherwise_28; form_form0_cell_integral_otherwise_36 = (form_form0_cell_integral_otherwise_8 * form_form0_cell_integral_otherwise_5 + -1.0 * form_form0_cell_integral_otherwise_12 * form_form0_cell_integral_otherwise_2) * form_form0_cell_integral_otherwise_23; form_form0_cell_integral_otherwise_37 = (form_form0_cell_integral_otherwise_12 * form_form0_cell_integral_otherwise_11 + -1.0 * form_form0_cell_integral_otherwise_0 * form_form0_cell_integral_otherwise_5) * form_form0_cell_integral_otherwise_23; form_form0_cell_integral_otherwise_38 = (form_form0_cell_integral_otherwise_0 * form_form0_cell_integral_otherwise_2 + -1.0 * form_form0_cell_integral_otherwise_8 * form_form0_cell_integral_otherwise_11) * form_form0_cell_integral_otherwise_23; form_form0_cell_integral_otherwise_39 = form_form0_cell_integral_otherwise_38 * form_form0_cell_integral_otherwise_30 + form_form0_cell_integral_otherwise_36 * form_form0_cell_integral_otherwise_26 + form_form0_cell_integral_otherwise_37 * form_form0_cell_integral_otherwise_28; form_form0_cell_integral_otherwise_40 = (form_form0_cell_integral_otherwise_39 * form_form0_cell_integral_otherwise_38 + form_form0_cell_integral_otherwise_31 * form_form0_cell_integral_otherwise_29 + form_form0_cell_integral_otherwise_35 * form_form0_cell_integral_otherwise_34) * form_form0_cell_integral_otherwise_20; form_form0_cell_integral_otherwise_42 = (form_form0_cell_integral_otherwise_39 * form_form0_cell_integral_otherwise_37 + form_form0_cell_integral_otherwise_31 * form_form0_cell_integral_otherwise_27 + form_form0_cell_integral_otherwise_35 * form_form0_cell_integral_otherwise_33) * form_form0_cell_integral_otherwise_20; form_form0_cell_integral_otherwise_44 = (form_form0_cell_integral_otherwise_39 * form_form0_cell_integral_otherwise_36 + form_form0_cell_integral_otherwise_31 * form_form0_cell_integral_otherwise_24 + form_form0_cell_integral_otherwise_35 * form_form0_cell_integral_otherwise_32) * form_form0_cell_integral_otherwise_20; for (int form_j_1 = 0; form_j_1 <= 3; ++form_j_1) t2[form_j_1] = t2[form_j_1] + form_form0_cell_integral_otherwise_41[form_j_1] * form_form0_cell_integral_otherwise_40 + form_form0_cell_integral_otherwise_43[form_j_1] * form_form0_cell_integral_otherwise_42 + form_form0_cell_integral_otherwise_22[form_j_1] + form_form0_cell_integral_otherwise_45[form_j_1] * form_form0_cell_integral_otherwise_44; for (int n_batch = 0; n_batch <= -1 + end + -8 * n_outer; ++n_batch) { /* no-op (insn=statement4) */ /* */ } for (int i3 = 0; i3 <= 3; ++i3) for (int n_batch = 0; n_batch <= -1 + end + -8 * n_outer; ++n_batch) dat2[map0[32 * n_outer + 4 * n_batch + i3]] = dat2[map0[32 * n_outer + 4 * n_batch + i3]] + t2[i3][n_batch]; } } }
saxpy.h
#pragma once // lab02-openmp-saxpy. Puede usar guanaco siempre que este disponible. // MORALEJA: en CPU multicore, conviene asignar segmentos continuos de memoria a los threads. // => localidad < t0 > < t1 > .... < tn > // // BUENO EN CPU // x1 x2 x3 x4 x5 x6 x7 x8 x9 // t1 t1 t1 t2 t2 t2 t3 t3 t3 // RAZON DE FONDO: **coherencia de cache** con segmentos grandes --> BIEN // " segmmentos pequenos (cs=1) --> FALSE SHARING // // MALO EN CPU --> FALSE SHARING // | x1 x2 x3 | x4 x5 x6 | x7 x8 x9 | // t1 t2 t3 t1 t2 t3 t1 t2 t3 // // BLOQUE DE PALABRAS=3 (ejemplo) // t1 lee x1,x2,x3 y lo pone en cache de su nucleo CPU (L1, cache por nucleo) // t2 lee x1,x2,x3 (lee un bloque de palabras). // t3 lee x1,x2,x3 (lee un bloque de palabras). // compartiendo elementos en sus lineas de cache // // saxpy es un ejemplo HPC sencillo ==> Hola Mundo de HPC // SAXPY => S = aX + Y S, X, Y vectores de n dimensiones // SAXPY se usa porque es sencillo, como hola mundo, pero no explota la CPU al 100%. // SAXPY es de los ejemplos mas sencillos que ofrece gran paralelismo de datos. // (1) haga un programa saxpy y mida el tiempo del calculo // (2) introduzca paralelismo con OpenMP, de distintas formas // a) funcion saxpy1 con parallel for // b) funcion saxpy2 con parallel for y chunksize = 1 // c) funcion saxpy3 con omp parallel manual con particion segmentos continuos // d) funcion saxpy4 con omp parallel manual con particion segmentos intercalados // (3) experimente comparando el resultado de cada metodo a distintos n // (4) Hacer un grafico tiempo vs n, usando todos los cores de la CPU // (5) Hacer un grafico tiempo vs nt, fijando el problema en n = 10^8 // (6) saque conclusiones sobre el rendimiento obtenido en base a ambos graficos. void init_vec(int *a, int n, int c){ #pragma omp parallel for for(int i=0; i<n; ++i){ a[i] = c*i; } } // (1) funcion saxpy1 con parallel for void saxpy1(int *s, int *x, int *y, int n, int a, int cs){ // directiva que paraleliza el for que viene a continuacion // al definir chunksize, este se calcula como chunksize = n / nt #pragma omp parallel for for(int i=0; i<n; ++i){ s[i] = a*x[i] + y[i]; } } // (2) funcion saxpy2 con parallel for y chucksize = 1 void saxpy2(int *s, int *x, int *y, int n, int a, int cs){ // [ thread 1 | thread2 | thread1 ] // chunksize 1 con dos threads--> [...................] ==> [ x1 | x2 | x3 ] // [ chunk1 | chunk2 | chunk3 ] // fenomeno negativo al rendimiento => 'false sharing' el algoritmo gatilla la actualizacion de caches de // cada core cuando no es necesario. #pragma omp parallel for schedule(static,cs) for(int i=0; i<n; ++i){ s[i] = a*x[i] + y[i]; } } // (3) funcion saxpy3 manual con particion segmentos continuos // x1 x2 x3 x4 x5 x6 x7 x8 x9 // t1 t1 t1 t2 t2 t2 t3 t3 t3 void saxpy3(int *s, int *x, int *y, int n, int a, int nt){ #pragma omp parallel shared(s, x, y, n, a, nt) { // i) Problema sin escenarios de "race conditions" o competencia por algun recurso. // ii) s -> escritura // iii) x, y, n, a, nt -> letura (nunca hay problemas) // iv) se activan todos los 'nt' threads // STRATEGIA: // a) calcular donde comienza a trabajar cada thread, y cuanto. // b) definir rangos unicos para cada thread (en el i-esimo thread). int tid = omp_get_thread_num(); // techo(n/nt) int subsize = (n + nt -1)/nt; // c) donde comienza cada thread int start = subsize * tid; // d) procesar el lote que corresponde printf("thread %i start %i subsize%i\n", tid, start, subsize); for(int i=start; i< start + subsize && i<n; ++i){ s[i] = a*x[i] + y[i]; } } } // (4) funcion saxpy4 manual con particion accesos intercalados // x1 x2 x3 x4 x5 x6 x7 x8 x9 // t1 t2 t3 t1 t2 t3 t1 t2 t3 // // x1 x2 x3 x4 x5 x6 x7 x8 x9 x10 // t0 t1 t2 t0 t1 t2 t0 t1 t2 t0 t1 void saxpy4(int *s, int *x, int *y, int n, int a, int nt){ #pragma omp parallel shared(s, x, y, n, a, nt) { // i) Problema sin escenarios de "race conditions" o competencia por algun recurso. // ii) s -> escritura // iii) x, y, n, a, nt -> letura (nunca hay problemas) // iv) se activan todos los 'nt' threads // STRATEGIA: // a) calcular donde comienza a trabajar cada thread, y cuanto. // b) definir rangos unicos para cada thread (en el i-esimo thread). int tid = omp_get_thread_num(); // techo(n/nt) //int subsize = (n + nt -1)/nt; // c) donde comienza cada thread //int start = subsize * tid; // d) procesar el lote que corresponde // printf("thread %i start %i subsize%i\n", tid, start, subsize); // x1 x2 x3 x4 x5 x6 x7 x8 x9 x10 // t0 t1 t2 t0 t1 t2 t0 t1 t2 t0 t1 for(int i=tid; i < n; i = i + nt){ s[i] = a*x[i] + y[i]; } } } void print_vec(int *a, int n, const char *msg){ if(n > 32){ return; } printf("%s\n[", msg); for(int i=0; i<n; ++i){ printf("%i ", a[i]); } printf("]\n"); }
util.h
/******************************************************************************* * Copyright 2018 Tensor Tang. All Rights Reserved * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. *******************************************************************************/ /** * This file defines some utilities that do not depends on jitinfer itself */ #pragma once #include <assert.h> #include <stddef.h> #include <stdint.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/time.h> #include "omp_thread.h" #ifdef WIN32 #include <malloc.h> #include <windows.h> #endif namespace jitinfer { namespace util { template <typename T> inline size_t array_product(const T *p, size_t num) { size_t out = 1; for (size_t i = 0; i < num; ++i) { out *= size_t(p[i]); } return out; } template <typename T, typename P> inline bool one_of(T val, P item) { return val == item; } template <typename T, typename P, typename... Args> inline bool one_of(T val, P item, Args... item_others) { return val == item || one_of(val, item_others...); } template <typename T> inline bool all_true(T expr) { return expr; } template <typename T, typename... Args> inline bool all_true(T expr, Args... others_expr) { return expr && all_true(others_expr...); } inline int dividable_of(int val, int divisor) { if (val % divisor == 0) { return divisor; } else { return 1; } } template <typename... Args> inline int dividable_of(int val, int divisor, Args... others_divisor) { if (val % divisor == 0) { return divisor; } else { return dividable_of(val, others_divisor...); } } inline int find_dividable(int val, int divisor) { if (divisor <= 1) { return 1; } if (divisor > val) { return val; } if (val % divisor == 0) { return divisor; } else { return find_dividable(val, divisor - 1); } } template <typename T> inline void copy_array(T *dst, const T *src, size_t sz) { // do not use memcpy, in case of memory aligment #pragma omp parallel for schedule(static) for (size_t i = 0; i < sz; ++i) { dst[i] = src[i]; } } template <typename T, typename U> inline void set_array(T *arr, const U &val, size_t size) { #pragma omp parallel for schedule(static) for (size_t i = 0; i < size; ++i) { arr[i] = static_cast<T>(val); } } template <typename T> struct remove_reference { typedef T type; }; template <typename T> struct remove_reference<T &> { typedef T type; }; template <typename T> struct remove_reference<T &&> { typedef T type; }; template <typename T, typename U> inline typename remove_reference<T>::type div_up(const T a, const U b) { assert(b); return (a + b - 1) / b; } template <typename T> inline T &&forward(typename remove_reference<T>::type &t) { return static_cast<T &&>(t); } template <typename T> inline T &&forward(typename remove_reference<T>::type &&t) { return static_cast<T &&>(t); } template <typename T> inline typename remove_reference<T>::type zero() { auto zero = typename remove_reference<T>::type(); return zero; } // divide jobs on workers // for example 4 jobs to 3 worker get 2,1,1 template <typename T, typename U> inline void balance211(T n, U team, U tid, T &n_start, T &n_end) { T n_min = 1; T &n_my = n_end; if (team <= 1 || n == 0) { n_start = 0; n_my = n; } else if (n_min == 1) { // team = T1 + T2 // n = T1*n1 + T2*n2 (n1 - n2 = 1) T n1 = div_up(n, (T)team); T n2 = n1 - 1; T T1 = n - n2 * (T)team; n_my = (T)tid < T1 ? n1 : n2; n_start = (T)tid <= T1 ? tid * n1 : T1 * n1 + ((T)tid - T1) * n2; } n_end += n_start; } template <typename T> inline T nd_iterator_init(T start) { return start; } template <typename T, typename U, typename W, typename... Args> inline T nd_iterator_init(T start, U &x, const W &X, Args &&... tuple) { start = nd_iterator_init(start, forward<Args>(tuple)...); x = start % X; return start / X; } inline bool nd_iterator_step() { return true; } template <typename U, typename W, typename... Args> inline bool nd_iterator_step(U &x, const W &X, Args &&... tuple) { if (nd_iterator_step(forward<Args>(tuple)...)) { x = (x + 1) % X; return x == 0; } return false; } template <typename U, typename W, typename Y> inline bool nd_iterator_jump(U &cur, const U end, W &x, const Y &X) { U max_jump = end - cur; U dim_jump = X - x; if (dim_jump <= max_jump) { x = 0; cur += dim_jump; return true; } else { cur += max_jump; x += max_jump; return false; } } template <typename U, typename W, typename Y, typename... Args> inline bool nd_iterator_jump( U &cur, const U end, W &x, const Y &X, Args &&... tuple) { if (nd_iterator_jump(cur, end, forward<Args>(tuple)...)) { x = (x + 1) % X; return x == 0; } return false; } namespace timer { inline double get_current_ms() { struct timeval time; gettimeofday(&time, NULL); return 1e+3 * time.tv_sec + 1e-3 * time.tv_usec; }; } namespace env { int _getenv(char *value, const char *name, int length); bool profiling_time(); bool jit_dump_code(); } } void *aligned_malloc(size_t size, int alignment); void free(void *p); }
GB_unop__identity_bool_uint8.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_bool_uint8) // op(A') function: GB (_unop_tran__identity_bool_uint8) // C type: bool // A type: uint8_t // cast: bool cij = (bool) aij // unaryop: cij = aij #define GB_ATYPE \ uint8_t #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ bool z = (bool) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint8_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ bool z = (bool) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_BOOL || GxB_NO_UINT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_bool_uint8) ( bool *Cx, // Cx and Ax may be aliased const uint8_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint8_t aij = Ax [p] ; bool z = (bool) aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint8_t aij = Ax [p] ; bool z = (bool) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_bool_uint8) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
draw.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % DDDD RRRR AAA W W % % D D R R A A W W % % D D RRRR AAAAA W W W % % D D R RN A A WW WW % % DDDD R R A A W W % % % % % % MagickCore Image Drawing Methods % % % % % % Software Design % % Cristy % % July 1998 % % % % % % Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Bill Radcliffe of Corbis (www.corbis.com) contributed the polygon % rendering code based on Paul Heckbert's "Concave Polygon Scan Conversion", % Graphics Gems, 1990. Leonard Rosenthal and David Harr of Appligent % (www.appligent.com) contributed the dash pattern, linecap stroking % algorithm, and minor rendering improvements. % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/annotate.h" #include "MagickCore/artifact.h" #include "MagickCore/blob.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/color.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/composite-private.h" #include "MagickCore/constitute.h" #include "MagickCore/draw.h" #include "MagickCore/draw-private.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/gem.h" #include "MagickCore/geometry.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/memory-private.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/paint.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/property.h" #include "MagickCore/resample.h" #include "MagickCore/resample-private.h" #include "MagickCore/resource_.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/token.h" #include "MagickCore/transform-private.h" #include "MagickCore/utility.h" /* Define declarations. */ #define BezierQuantum 200 #define DrawEpsilon (1.0e-10) /* Typedef declarations. */ typedef struct _EdgeInfo { SegmentInfo bounds; double scanline; PointInfo *points; size_t number_points; ssize_t direction; MagickBooleanType ghostline; size_t highwater; } EdgeInfo; typedef struct _ElementInfo { double cx, cy, major, minor, angle; } ElementInfo; typedef struct _PolygonInfo { EdgeInfo *edges; size_t number_edges; } PolygonInfo; typedef enum { MoveToCode, OpenCode, GhostlineCode, LineToCode, EndCode } PathInfoCode; typedef struct _PathInfo { PointInfo point; PathInfoCode code; } PathInfo; /* Forward declarations. */ static MagickBooleanType DrawStrokePolygon(Image *,const DrawInfo *,const PrimitiveInfo *, ExceptionInfo *); static PrimitiveInfo *TraceStrokePolygon(const DrawInfo *,const PrimitiveInfo *); static size_t TracePath(PrimitiveInfo *,const char *); static void TraceArc(PrimitiveInfo *,const PointInfo,const PointInfo,const PointInfo), TraceArcPath(PrimitiveInfo *,const PointInfo,const PointInfo,const PointInfo, const double,const MagickBooleanType,const MagickBooleanType), TraceBezier(PrimitiveInfo *,const size_t), TraceCircle(PrimitiveInfo *,const PointInfo,const PointInfo), TraceEllipse(PrimitiveInfo *,const PointInfo,const PointInfo, const PointInfo), TraceLine(PrimitiveInfo *,const PointInfo,const PointInfo), TraceRectangle(PrimitiveInfo *,const PointInfo,const PointInfo), TraceRoundRectangle(PrimitiveInfo *,const PointInfo,const PointInfo, PointInfo), TraceSquareLinecap(PrimitiveInfo *,const size_t,const double); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e D r a w I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireDrawInfo() returns a DrawInfo structure properly initialized. % % The format of the AcquireDrawInfo method is: % % DrawInfo *AcquireDrawInfo(void) % */ MagickExport DrawInfo *AcquireDrawInfo(void) { DrawInfo *draw_info; draw_info=(DrawInfo *) AcquireCriticalMemory(sizeof(*draw_info)); GetDrawInfo((ImageInfo *) NULL,draw_info); return(draw_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e D r a w I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneDrawInfo() makes a copy of the given draw_info structure. If NULL % is specified, a new DrawInfo structure is created initialized to default % values. % % The format of the CloneDrawInfo method is: % % DrawInfo *CloneDrawInfo(const ImageInfo *image_info, % const DrawInfo *draw_info) % % A description of each parameter follows: % % o image_info: the image info. % % o draw_info: the draw info. % */ MagickExport DrawInfo *CloneDrawInfo(const ImageInfo *image_info, const DrawInfo *draw_info) { DrawInfo *clone_info; ExceptionInfo *exception; clone_info=(DrawInfo *) AcquireCriticalMemory(sizeof(*clone_info)); GetDrawInfo(image_info,clone_info); if (draw_info == (DrawInfo *) NULL) return(clone_info); exception=AcquireExceptionInfo(); if (clone_info->primitive != (char *) NULL) (void) CloneString(&clone_info->primitive,draw_info->primitive); if (draw_info->geometry != (char *) NULL) (void) CloneString(&clone_info->geometry,draw_info->geometry); clone_info->viewbox=draw_info->viewbox; clone_info->affine=draw_info->affine; clone_info->gravity=draw_info->gravity; clone_info->fill=draw_info->fill; clone_info->stroke=draw_info->stroke; clone_info->stroke_width=draw_info->stroke_width; if (draw_info->fill_pattern != (Image *) NULL) clone_info->fill_pattern=CloneImage(draw_info->fill_pattern,0,0,MagickTrue, exception); if (draw_info->stroke_pattern != (Image *) NULL) clone_info->stroke_pattern=CloneImage(draw_info->stroke_pattern,0,0, MagickTrue,exception); clone_info->stroke_antialias=draw_info->stroke_antialias; clone_info->text_antialias=draw_info->text_antialias; clone_info->fill_rule=draw_info->fill_rule; clone_info->linecap=draw_info->linecap; clone_info->linejoin=draw_info->linejoin; clone_info->miterlimit=draw_info->miterlimit; clone_info->dash_offset=draw_info->dash_offset; clone_info->decorate=draw_info->decorate; clone_info->compose=draw_info->compose; if (draw_info->text != (char *) NULL) (void) CloneString(&clone_info->text,draw_info->text); if (draw_info->font != (char *) NULL) (void) CloneString(&clone_info->font,draw_info->font); if (draw_info->metrics != (char *) NULL) (void) CloneString(&clone_info->metrics,draw_info->metrics); if (draw_info->family != (char *) NULL) (void) CloneString(&clone_info->family,draw_info->family); clone_info->style=draw_info->style; clone_info->stretch=draw_info->stretch; clone_info->weight=draw_info->weight; if (draw_info->encoding != (char *) NULL) (void) CloneString(&clone_info->encoding,draw_info->encoding); clone_info->pointsize=draw_info->pointsize; clone_info->kerning=draw_info->kerning; clone_info->interline_spacing=draw_info->interline_spacing; clone_info->interword_spacing=draw_info->interword_spacing; clone_info->direction=draw_info->direction; if (draw_info->density != (char *) NULL) (void) CloneString(&clone_info->density,draw_info->density); clone_info->align=draw_info->align; clone_info->undercolor=draw_info->undercolor; clone_info->border_color=draw_info->border_color; if (draw_info->server_name != (char *) NULL) (void) CloneString(&clone_info->server_name,draw_info->server_name); if (draw_info->dash_pattern != (double *) NULL) { register ssize_t x; for (x=0; fabs(draw_info->dash_pattern[x]) >= DrawEpsilon; x++) ; clone_info->dash_pattern=(double *) AcquireQuantumMemory((size_t) x+1UL, sizeof(*clone_info->dash_pattern)); if (clone_info->dash_pattern == (double *) NULL) ThrowFatalException(ResourceLimitFatalError, "UnableToAllocateDashPattern"); (void) CopyMagickMemory(clone_info->dash_pattern,draw_info->dash_pattern, (size_t) (x+1)*sizeof(*clone_info->dash_pattern)); } clone_info->gradient=draw_info->gradient; if (draw_info->gradient.stops != (StopInfo *) NULL) { size_t number_stops; number_stops=clone_info->gradient.number_stops; clone_info->gradient.stops=(StopInfo *) AcquireQuantumMemory((size_t) number_stops,sizeof(*clone_info->gradient.stops)); if (clone_info->gradient.stops == (StopInfo *) NULL) ThrowFatalException(ResourceLimitFatalError, "UnableToAllocateDashPattern"); (void) CopyMagickMemory(clone_info->gradient.stops, draw_info->gradient.stops,(size_t) number_stops* sizeof(*clone_info->gradient.stops)); } if (draw_info->clip_mask != (char *) NULL) (void) CloneString(&clone_info->clip_mask,draw_info->clip_mask); clone_info->bounds=draw_info->bounds; clone_info->clip_units=draw_info->clip_units; clone_info->render=draw_info->render; clone_info->fill_alpha=draw_info->fill_alpha; clone_info->stroke_alpha=draw_info->stroke_alpha; clone_info->element_reference=draw_info->element_reference; clone_info->debug=IsEventLogging(); exception=DestroyExceptionInfo(exception); return(clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C o n v e r t P a t h T o P o l y g o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConvertPathToPolygon() converts a path to the more efficient sorted % rendering form. % % The format of the ConvertPathToPolygon method is: % % PolygonInfo *ConvertPathToPolygon(const DrawInfo *draw_info, % const PathInfo *path_info) % % A description of each parameter follows: % % o Method ConvertPathToPolygon returns the path in a more efficient sorted % rendering form of type PolygonInfo. % % o draw_info: Specifies a pointer to an DrawInfo structure. % % o path_info: Specifies a pointer to an PathInfo structure. % % */ #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static int CompareEdges(const void *x,const void *y) { register const EdgeInfo *p, *q; /* Compare two edges. */ p=(const EdgeInfo *) x; q=(const EdgeInfo *) y; if ((p->points[0].y-DrawEpsilon) > q->points[0].y) return(1); if ((p->points[0].y+DrawEpsilon) < q->points[0].y) return(-1); if ((p->points[0].x-DrawEpsilon) > q->points[0].x) return(1); if ((p->points[0].x+DrawEpsilon) < q->points[0].x) return(-1); if (((p->points[1].x-p->points[0].x)*(q->points[1].y-q->points[0].y)- (p->points[1].y-p->points[0].y)*(q->points[1].x-q->points[0].x)) > 0.0) return(1); return(-1); } #if defined(__cplusplus) || defined(c_plusplus) } #endif static void LogPolygonInfo(const PolygonInfo *polygon_info) { register EdgeInfo *p; register ssize_t i, j; (void) LogMagickEvent(DrawEvent,GetMagickModule()," begin active-edge"); p=polygon_info->edges; for (i=0; i < (ssize_t) polygon_info->number_edges; i++) { (void) LogMagickEvent(DrawEvent,GetMagickModule()," edge %.20g:", (double) i); (void) LogMagickEvent(DrawEvent,GetMagickModule()," direction: %s", p->direction != MagickFalse ? "down" : "up"); (void) LogMagickEvent(DrawEvent,GetMagickModule()," ghostline: %s", p->ghostline != MagickFalse ? "transparent" : "opaque"); (void) LogMagickEvent(DrawEvent,GetMagickModule(), " bounds: %g,%g - %g,%g",p->bounds.x1,p->bounds.y1, p->bounds.x2,p->bounds.y2); for (j=0; j < (ssize_t) p->number_points; j++) (void) LogMagickEvent(DrawEvent,GetMagickModule()," %g,%g", p->points[j].x,p->points[j].y); p++; } (void) LogMagickEvent(DrawEvent,GetMagickModule()," end active-edge"); } static void ReversePoints(PointInfo *points,const size_t number_points) { PointInfo point; register ssize_t i; for (i=0; i < (ssize_t) (number_points >> 1); i++) { point=points[i]; points[i]=points[number_points-(i+1)]; points[number_points-(i+1)]=point; } } static PolygonInfo *ConvertPathToPolygon(const PathInfo *path_info) { long direction, next_direction; PointInfo point, *points; PolygonInfo *polygon_info; SegmentInfo bounds; register ssize_t i, n; MagickBooleanType ghostline; size_t edge, number_edges, number_points; /* Convert a path to the more efficient sorted rendering form. */ polygon_info=(PolygonInfo *) AcquireMagickMemory(sizeof(*polygon_info)); if (polygon_info == (PolygonInfo *) NULL) return((PolygonInfo *) NULL); number_edges=16; polygon_info->edges=(EdgeInfo *) AcquireQuantumMemory(number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) return((PolygonInfo *) NULL); (void) ResetMagickMemory(polygon_info->edges,0,number_edges* sizeof(*polygon_info->edges)); direction=0; edge=0; ghostline=MagickFalse; n=0; number_points=0; points=(PointInfo *) NULL; (void) ResetMagickMemory(&point,0,sizeof(point)); (void) ResetMagickMemory(&bounds,0,sizeof(bounds)); for (i=0; path_info[i].code != EndCode; i++) { if ((path_info[i].code == MoveToCode) || (path_info[i].code == OpenCode) || (path_info[i].code == GhostlineCode)) { /* Move to. */ if ((points != (PointInfo *) NULL) && (n >= 2)) { if (edge == number_edges) { number_edges<<=1; polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory( polygon_info->edges,(size_t) number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) return((PolygonInfo *) NULL); } polygon_info->edges[edge].number_points=(size_t) n; polygon_info->edges[edge].scanline=(-1.0); polygon_info->edges[edge].highwater=0; polygon_info->edges[edge].ghostline=ghostline; polygon_info->edges[edge].direction=(ssize_t) (direction > 0); if (direction < 0) ReversePoints(points,(size_t) n); polygon_info->edges[edge].points=points; polygon_info->edges[edge].bounds=bounds; polygon_info->edges[edge].bounds.y1=points[0].y; polygon_info->edges[edge].bounds.y2=points[n-1].y; points=(PointInfo *) NULL; ghostline=MagickFalse; edge++; } if (points == (PointInfo *) NULL) { number_points=16; points=(PointInfo *) AcquireQuantumMemory((size_t) number_points, sizeof(*points)); if (points == (PointInfo *) NULL) return((PolygonInfo *) NULL); } ghostline=path_info[i].code == GhostlineCode ? MagickTrue : MagickFalse; point=path_info[i].point; points[0]=point; bounds.x1=point.x; bounds.x2=point.x; direction=0; n=1; continue; } /* Line to. */ next_direction=((path_info[i].point.y > point.y) || ((fabs(path_info[i].point.y-point.y) < DrawEpsilon) && (path_info[i].point.x > point.x))) ? 1 : -1; if ((points != (PointInfo *) NULL) && (direction != 0) && (direction != next_direction)) { /* New edge. */ point=points[n-1]; if (edge == number_edges) { number_edges<<=1; polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory( polygon_info->edges,(size_t) number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) return((PolygonInfo *) NULL); } polygon_info->edges[edge].number_points=(size_t) n; polygon_info->edges[edge].scanline=(-1.0); polygon_info->edges[edge].highwater=0; polygon_info->edges[edge].ghostline=ghostline; polygon_info->edges[edge].direction=(ssize_t) (direction > 0); if (direction < 0) ReversePoints(points,(size_t) n); polygon_info->edges[edge].points=points; polygon_info->edges[edge].bounds=bounds; polygon_info->edges[edge].bounds.y1=points[0].y; polygon_info->edges[edge].bounds.y2=points[n-1].y; number_points=16; points=(PointInfo *) AcquireQuantumMemory((size_t) number_points, sizeof(*points)); if (points == (PointInfo *) NULL) return((PolygonInfo *) NULL); n=1; ghostline=MagickFalse; points[0]=point; bounds.x1=point.x; bounds.x2=point.x; edge++; } direction=next_direction; if (points == (PointInfo *) NULL) continue; if (n == (ssize_t) number_points) { number_points<<=1; points=(PointInfo *) ResizeQuantumMemory(points,(size_t) number_points, sizeof(*points)); if (points == (PointInfo *) NULL) return((PolygonInfo *) NULL); } point=path_info[i].point; points[n]=point; if (point.x < bounds.x1) bounds.x1=point.x; if (point.x > bounds.x2) bounds.x2=point.x; n++; } if (points != (PointInfo *) NULL) { if (n < 2) points=(PointInfo *) RelinquishMagickMemory(points); else { if (edge == number_edges) { number_edges<<=1; polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory( polygon_info->edges,(size_t) number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) return((PolygonInfo *) NULL); } polygon_info->edges[edge].number_points=(size_t) n; polygon_info->edges[edge].scanline=(-1.0); polygon_info->edges[edge].highwater=0; polygon_info->edges[edge].ghostline=ghostline; polygon_info->edges[edge].direction=(ssize_t) (direction > 0); if (direction < 0) ReversePoints(points,(size_t) n); polygon_info->edges[edge].points=points; polygon_info->edges[edge].bounds=bounds; polygon_info->edges[edge].bounds.y1=points[0].y; polygon_info->edges[edge].bounds.y2=points[n-1].y; ghostline=MagickFalse; edge++; } } polygon_info->number_edges=edge; qsort(polygon_info->edges,(size_t) polygon_info->number_edges, sizeof(*polygon_info->edges),CompareEdges); if (IsEventLogging() != MagickFalse) LogPolygonInfo(polygon_info); return(polygon_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C o n v e r t P r i m i t i v e T o P a t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConvertPrimitiveToPath() converts a PrimitiveInfo structure into a vector % path structure. % % The format of the ConvertPrimitiveToPath method is: % % PathInfo *ConvertPrimitiveToPath(const DrawInfo *draw_info, % const PrimitiveInfo *primitive_info) % % A description of each parameter follows: % % o Method ConvertPrimitiveToPath returns a vector path structure of type % PathInfo. % % o draw_info: a structure of type DrawInfo. % % o primitive_info: Specifies a pointer to an PrimitiveInfo structure. % % */ static void LogPathInfo(const PathInfo *path_info) { register const PathInfo *p; (void) LogMagickEvent(DrawEvent,GetMagickModule()," begin vector-path"); for (p=path_info; p->code != EndCode; p++) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " %g,%g %s",p->point.x,p->point.y,p->code == GhostlineCode ? "moveto ghostline" : p->code == OpenCode ? "moveto open" : p->code == MoveToCode ? "moveto" : p->code == LineToCode ? "lineto" : "?"); (void) LogMagickEvent(DrawEvent,GetMagickModule()," end vector-path"); } static PathInfo *ConvertPrimitiveToPath(const PrimitiveInfo *primitive_info) { PathInfo *path_info; PathInfoCode code; PointInfo p, q; register ssize_t i, n; ssize_t coordinates, start; /* Converts a PrimitiveInfo structure into a vector path structure. */ switch (primitive_info->primitive) { case AlphaPrimitive: case ColorPrimitive: case ImagePrimitive: case PointPrimitive: case TextPrimitive: return((PathInfo *) NULL); default: break; } for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ; path_info=(PathInfo *) AcquireQuantumMemory((size_t) (2UL*i+3UL), sizeof(*path_info)); if (path_info == (PathInfo *) NULL) return((PathInfo *) NULL); coordinates=0; n=0; p.x=(-1.0); p.y=(-1.0); q.x=(-1.0); q.y=(-1.0); start=0; for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) { code=LineToCode; if (coordinates <= 0) { coordinates=(ssize_t) primitive_info[i].coordinates; p=primitive_info[i].point; start=n; code=MoveToCode; } coordinates--; /* Eliminate duplicate points. */ if ((i == 0) || (fabs(q.x-primitive_info[i].point.x) >= DrawEpsilon) || (fabs(q.y-primitive_info[i].point.y) >= DrawEpsilon)) { path_info[n].code=code; path_info[n].point=primitive_info[i].point; q=primitive_info[i].point; n++; } if (coordinates > 0) continue; if ((fabs(p.x-primitive_info[i].point.x) < DrawEpsilon) && (fabs(p.y-primitive_info[i].point.y) < DrawEpsilon)) continue; /* Mark the p point as open if it does not match the q. */ path_info[start].code=OpenCode; path_info[n].code=GhostlineCode; path_info[n].point=primitive_info[i].point; n++; path_info[n].code=LineToCode; path_info[n].point=p; n++; } path_info[n].code=EndCode; path_info[n].point.x=0.0; path_info[n].point.y=0.0; if (IsEventLogging() != MagickFalse) LogPathInfo(path_info); return(path_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y D r a w I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyDrawInfo() deallocates memory associated with an DrawInfo % structure. % % The format of the DestroyDrawInfo method is: % % DrawInfo *DestroyDrawInfo(DrawInfo *draw_info) % % A description of each parameter follows: % % o draw_info: the draw info. % */ MagickExport DrawInfo *DestroyDrawInfo(DrawInfo *draw_info) { if (draw_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(draw_info != (DrawInfo *) NULL); assert(draw_info->signature == MagickCoreSignature); if (draw_info->primitive != (char *) NULL) draw_info->primitive=DestroyString(draw_info->primitive); if (draw_info->text != (char *) NULL) draw_info->text=DestroyString(draw_info->text); if (draw_info->geometry != (char *) NULL) draw_info->geometry=DestroyString(draw_info->geometry); if (draw_info->fill_pattern != (Image *) NULL) draw_info->fill_pattern=DestroyImage(draw_info->fill_pattern); if (draw_info->stroke_pattern != (Image *) NULL) draw_info->stroke_pattern=DestroyImage(draw_info->stroke_pattern); if (draw_info->font != (char *) NULL) draw_info->font=DestroyString(draw_info->font); if (draw_info->metrics != (char *) NULL) draw_info->metrics=DestroyString(draw_info->metrics); if (draw_info->family != (char *) NULL) draw_info->family=DestroyString(draw_info->family); if (draw_info->encoding != (char *) NULL) draw_info->encoding=DestroyString(draw_info->encoding); if (draw_info->density != (char *) NULL) draw_info->density=DestroyString(draw_info->density); if (draw_info->server_name != (char *) NULL) draw_info->server_name=(char *) RelinquishMagickMemory(draw_info->server_name); if (draw_info->dash_pattern != (double *) NULL) draw_info->dash_pattern=(double *) RelinquishMagickMemory( draw_info->dash_pattern); if (draw_info->gradient.stops != (StopInfo *) NULL) draw_info->gradient.stops=(StopInfo *) RelinquishMagickMemory( draw_info->gradient.stops); if (draw_info->clip_mask != (char *) NULL) draw_info->clip_mask=DestroyString(draw_info->clip_mask); draw_info->signature=(~MagickCoreSignature); draw_info=(DrawInfo *) RelinquishMagickMemory(draw_info); return(draw_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y E d g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyEdge() destroys the specified polygon edge. % % The format of the DestroyEdge method is: % % ssize_t DestroyEdge(PolygonInfo *polygon_info,const int edge) % % A description of each parameter follows: % % o polygon_info: Specifies a pointer to an PolygonInfo structure. % % o edge: the polygon edge number to destroy. % */ static size_t DestroyEdge(PolygonInfo *polygon_info, const size_t edge) { assert(edge < polygon_info->number_edges); polygon_info->edges[edge].points=(PointInfo *) RelinquishMagickMemory( polygon_info->edges[edge].points); polygon_info->number_edges--; if (edge < polygon_info->number_edges) (void) CopyMagickMemory(polygon_info->edges+edge,polygon_info->edges+edge+1, (size_t) (polygon_info->number_edges-edge)*sizeof(*polygon_info->edges)); return(polygon_info->number_edges); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y P o l y g o n I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyPolygonInfo() destroys the PolygonInfo data structure. % % The format of the DestroyPolygonInfo method is: % % PolygonInfo *DestroyPolygonInfo(PolygonInfo *polygon_info) % % A description of each parameter follows: % % o polygon_info: Specifies a pointer to an PolygonInfo structure. % */ static PolygonInfo *DestroyPolygonInfo(PolygonInfo *polygon_info) { register ssize_t i; for (i=0; i < (ssize_t) polygon_info->number_edges; i++) polygon_info->edges[i].points=(PointInfo *) RelinquishMagickMemory(polygon_info->edges[i].points); polygon_info->edges=(EdgeInfo *) RelinquishMagickMemory(polygon_info->edges); return((PolygonInfo *) RelinquishMagickMemory(polygon_info)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w A f f i n e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawAffineImage() composites the source over the destination image as % dictated by the affine transform. % % The format of the DrawAffineImage method is: % % MagickBooleanType DrawAffineImage(Image *image,const Image *source, % const AffineMatrix *affine,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o source: the source image. % % o affine: the affine transform. % % o exception: return any errors or warnings in this structure. % */ static SegmentInfo AffineEdge(const Image *image,const AffineMatrix *affine, const double y,const SegmentInfo *edge) { double intercept, z; register double x; SegmentInfo inverse_edge; /* Determine left and right edges. */ inverse_edge.x1=edge->x1; inverse_edge.y1=edge->y1; inverse_edge.x2=edge->x2; inverse_edge.y2=edge->y2; z=affine->ry*y+affine->tx; if (affine->sx >= DrawEpsilon) { intercept=(-z/affine->sx); x=intercept; if (x > inverse_edge.x1) inverse_edge.x1=x; intercept=(-z+(double) image->columns)/affine->sx; x=intercept; if (x < inverse_edge.x2) inverse_edge.x2=x; } else if (affine->sx < -DrawEpsilon) { intercept=(-z+(double) image->columns)/affine->sx; x=intercept; if (x > inverse_edge.x1) inverse_edge.x1=x; intercept=(-z/affine->sx); x=intercept; if (x < inverse_edge.x2) inverse_edge.x2=x; } else if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->columns)) { inverse_edge.x2=edge->x1; return(inverse_edge); } /* Determine top and bottom edges. */ z=affine->sy*y+affine->ty; if (affine->rx >= DrawEpsilon) { intercept=(-z/affine->rx); x=intercept; if (x > inverse_edge.x1) inverse_edge.x1=x; intercept=(-z+(double) image->rows)/affine->rx; x=intercept; if (x < inverse_edge.x2) inverse_edge.x2=x; } else if (affine->rx < -DrawEpsilon) { intercept=(-z+(double) image->rows)/affine->rx; x=intercept; if (x > inverse_edge.x1) inverse_edge.x1=x; intercept=(-z/affine->rx); x=intercept; if (x < inverse_edge.x2) inverse_edge.x2=x; } else if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->rows)) { inverse_edge.x2=edge->x2; return(inverse_edge); } return(inverse_edge); } static AffineMatrix InverseAffineMatrix(const AffineMatrix *affine) { AffineMatrix inverse_affine; double determinant; determinant=PerceptibleReciprocal(affine->sx*affine->sy-affine->rx* affine->ry); inverse_affine.sx=determinant*affine->sy; inverse_affine.rx=determinant*(-affine->rx); inverse_affine.ry=determinant*(-affine->ry); inverse_affine.sy=determinant*affine->sx; inverse_affine.tx=(-affine->tx)*inverse_affine.sx-affine->ty* inverse_affine.ry; inverse_affine.ty=(-affine->tx)*inverse_affine.rx-affine->ty* inverse_affine.sy; return(inverse_affine); } MagickExport MagickBooleanType DrawAffineImage(Image *image, const Image *source,const AffineMatrix *affine,ExceptionInfo *exception) { AffineMatrix inverse_affine; CacheView *image_view, *source_view; MagickBooleanType status; PixelInfo zero; PointInfo extent[4], min, max; register ssize_t i; SegmentInfo edge; ssize_t start, stop, y; /* Determine bounding box. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(source != (const Image *) NULL); assert(source->signature == MagickCoreSignature); assert(affine != (AffineMatrix *) NULL); extent[0].x=0.0; extent[0].y=0.0; extent[1].x=(double) source->columns-1.0; extent[1].y=0.0; extent[2].x=(double) source->columns-1.0; extent[2].y=(double) source->rows-1.0; extent[3].x=0.0; extent[3].y=(double) source->rows-1.0; for (i=0; i < 4; i++) { PointInfo point; point=extent[i]; extent[i].x=point.x*affine->sx+point.y*affine->ry+affine->tx; extent[i].y=point.x*affine->rx+point.y*affine->sy+affine->ty; } min=extent[0]; max=extent[0]; for (i=1; i < 4; i++) { if (min.x > extent[i].x) min.x=extent[i].x; if (min.y > extent[i].y) min.y=extent[i].y; if (max.x < extent[i].x) max.x=extent[i].x; if (max.y < extent[i].y) max.y=extent[i].y; } /* Affine transform image. */ if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); status=MagickTrue; edge.x1=MagickMax(min.x,0.0); edge.y1=MagickMax(min.y,0.0); edge.x2=MagickMin(max.x,(double) image->columns-1.0); edge.y2=MagickMin(max.y,(double) image->rows-1.0); inverse_affine=InverseAffineMatrix(affine); GetPixelInfo(image,&zero); start=(ssize_t) ceil(edge.y1-0.5); stop=(ssize_t) floor(edge.y2+0.5); source_view=AcquireVirtualCacheView(source,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_number_threads(source,image,stop-start,1) #endif for (y=start; y <= stop; y++) { PixelInfo composite, pixel; PointInfo point; register ssize_t x; register Quantum *magick_restrict q; SegmentInfo inverse_edge; ssize_t x_offset; inverse_edge=AffineEdge(source,&inverse_affine,(double) y,&edge); if (inverse_edge.x2 < inverse_edge.x1) continue; q=GetCacheViewAuthenticPixels(image_view,(ssize_t) ceil(inverse_edge.x1- 0.5),y,(size_t) (floor(inverse_edge.x2+0.5)-ceil(inverse_edge.x1-0.5)+1), 1,exception); if (q == (Quantum *) NULL) continue; pixel=zero; composite=zero; x_offset=0; for (x=(ssize_t) ceil(inverse_edge.x1-0.5); x <= (ssize_t) floor(inverse_edge.x2+0.5); x++) { point.x=(double) x*inverse_affine.sx+y*inverse_affine.ry+ inverse_affine.tx; point.y=(double) x*inverse_affine.rx+y*inverse_affine.sy+ inverse_affine.ty; (void) InterpolatePixelInfo(source,source_view,UndefinedInterpolatePixel, point.x,point.y,&pixel,exception); GetPixelInfoPixel(image,q,&composite); CompositePixelInfoOver(&pixel,pixel.alpha,&composite,composite.alpha, &composite); SetPixelViaPixelInfo(image,&composite,q); x_offset++; q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D r a w B o u n d i n g R e c t a n g l e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawBoundingRectangles() draws the bounding rectangles on the image. This % is only useful for developers debugging the rendering algorithm. % % The format of the DrawBoundingRectangles method is: % % void DrawBoundingRectangles(Image *image,const DrawInfo *draw_info, % PolygonInfo *polygon_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o polygon_info: Specifies a pointer to a PolygonInfo structure. % % o exception: return any errors or warnings in this structure. % */ static void DrawBoundingRectangles(Image *image,const DrawInfo *draw_info, const PolygonInfo *polygon_info,ExceptionInfo *exception) { DrawInfo *clone_info; double mid; PointInfo end, resolution, start; PrimitiveInfo primitive_info[6]; register ssize_t i; SegmentInfo bounds; ssize_t coordinates; clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); (void) QueryColorCompliance("#000F",AllCompliance,&clone_info->fill, exception); resolution.x=96.0; resolution.y=96.0; if (clone_info->density != (char *) NULL) { GeometryInfo geometry_info; MagickStatusType flags; flags=ParseGeometry(clone_info->density,&geometry_info); resolution.x=geometry_info.rho; resolution.y=geometry_info.sigma; if ((flags & SigmaValue) == MagickFalse) resolution.y=resolution.x; } mid=(resolution.x/96.0)*ExpandAffine(&clone_info->affine)* clone_info->stroke_width/2.0; bounds.x1=0.0; bounds.y1=0.0; bounds.x2=0.0; bounds.y2=0.0; if (polygon_info != (PolygonInfo *) NULL) { bounds=polygon_info->edges[0].bounds; for (i=1; i < (ssize_t) polygon_info->number_edges; i++) { if (polygon_info->edges[i].bounds.x1 < (double) bounds.x1) bounds.x1=polygon_info->edges[i].bounds.x1; if (polygon_info->edges[i].bounds.y1 < (double) bounds.y1) bounds.y1=polygon_info->edges[i].bounds.y1; if (polygon_info->edges[i].bounds.x2 > (double) bounds.x2) bounds.x2=polygon_info->edges[i].bounds.x2; if (polygon_info->edges[i].bounds.y2 > (double) bounds.y2) bounds.y2=polygon_info->edges[i].bounds.y2; } bounds.x1-=mid; bounds.x1=bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double) image->columns ? (double) image->columns-1 : bounds.x1; bounds.y1-=mid; bounds.y1=bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double) image->rows ? (double) image->rows-1 : bounds.y1; bounds.x2+=mid; bounds.x2=bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double) image->columns ? (double) image->columns-1 : bounds.x2; bounds.y2+=mid; bounds.y2=bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double) image->rows ? (double) image->rows-1 : bounds.y2; for (i=0; i < (ssize_t) polygon_info->number_edges; i++) { if (polygon_info->edges[i].direction != 0) (void) QueryColorCompliance("red",AllCompliance,&clone_info->stroke, exception); else (void) QueryColorCompliance("green",AllCompliance,&clone_info->stroke, exception); start.x=(double) (polygon_info->edges[i].bounds.x1-mid); start.y=(double) (polygon_info->edges[i].bounds.y1-mid); end.x=(double) (polygon_info->edges[i].bounds.x2+mid); end.y=(double) (polygon_info->edges[i].bounds.y2+mid); primitive_info[0].primitive=RectanglePrimitive; TraceRectangle(primitive_info,start,end); primitive_info[0].method=ReplaceMethod; coordinates=(ssize_t) primitive_info[0].coordinates; primitive_info[coordinates].primitive=UndefinedPrimitive; (void) DrawPrimitive(image,clone_info,primitive_info,exception); } } (void) QueryColorCompliance("blue",AllCompliance,&clone_info->stroke, exception); start.x=(double) (bounds.x1-mid); start.y=(double) (bounds.y1-mid); end.x=(double) (bounds.x2+mid); end.y=(double) (bounds.y2+mid); primitive_info[0].primitive=RectanglePrimitive; TraceRectangle(primitive_info,start,end); primitive_info[0].method=ReplaceMethod; coordinates=(ssize_t) primitive_info[0].coordinates; primitive_info[coordinates].primitive=UndefinedPrimitive; (void) DrawPrimitive(image,clone_info,primitive_info,exception); clone_info=DestroyDrawInfo(clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w C l i p P a t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawClipPath() draws the clip path on the image mask. % % The format of the DrawClipPath method is: % % MagickBooleanType DrawClipPath(Image *image,const DrawInfo *draw_info, % const char *name,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o name: the name of the clip path. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType DrawClipPath(Image *image, const DrawInfo *draw_info,const char *name,ExceptionInfo *exception) { char filename[MagickPathExtent]; Image *clip_mask; const char *value; DrawInfo *clone_info; MagickStatusType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (const DrawInfo *) NULL); (void) FormatLocaleString(filename,MagickPathExtent,"%s",name); value=GetImageArtifact(image,filename); if (value == (const char *) NULL) return(MagickFalse); clip_mask=CloneImage(image,image->columns,image->rows,MagickTrue,exception); if (clip_mask == (Image *) NULL) return(MagickFalse); (void) QueryColorCompliance("#0000",AllCompliance, &clip_mask->background_color,exception); clip_mask->background_color.alpha=(MagickRealType) TransparentAlpha; (void) SetImageBackgroundColor(clip_mask,exception); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"\nbegin clip-path %s", draw_info->clip_mask); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); (void) CloneString(&clone_info->primitive,value); (void) QueryColorCompliance("#ffffff",AllCompliance,&clone_info->fill, exception); clone_info->clip_mask=(char *) NULL; status=NegateImage(clip_mask,MagickFalse,exception); (void) SetImageMask(image,ReadPixelMask,clip_mask,exception); clip_mask=DestroyImage(clip_mask); status&=DrawImage(image,clone_info,exception); clone_info=DestroyDrawInfo(clone_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"end clip-path"); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D r a w D a s h P o l y g o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawDashPolygon() draws a dashed polygon (line, rectangle, ellipse) on the % image while respecting the dash offset and dash pattern attributes. % % The format of the DrawDashPolygon method is: % % MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info, % const PrimitiveInfo *primitive_info,Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o draw_info: the draw info. % % o primitive_info: Specifies a pointer to a PrimitiveInfo structure. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info, const PrimitiveInfo *primitive_info,Image *image,ExceptionInfo *exception) { DrawInfo *clone_info; double length, maximum_length, offset, scale, total_length; MagickStatusType status; PrimitiveInfo *dash_polygon; register ssize_t i; register double dx, dy; size_t number_vertices; ssize_t j, n; assert(draw_info != (const DrawInfo *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-dash"); for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ; number_vertices=(size_t) i; dash_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t) (2UL*number_vertices+1UL),sizeof(*dash_polygon)); if (dash_polygon == (PrimitiveInfo *) NULL) return(MagickFalse); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); clone_info->miterlimit=0; dash_polygon[0]=primitive_info[0]; scale=ExpandAffine(&draw_info->affine); length=scale*(draw_info->dash_pattern[0]-0.5); offset=fabs(draw_info->dash_offset) >= DrawEpsilon ? scale*draw_info->dash_offset : 0.0; j=1; for (n=0; offset > 0.0; j=0) { if (draw_info->dash_pattern[n] <= 0.0) break; length=scale*(draw_info->dash_pattern[n]+(n == 0 ? -0.5 : 0.5)); if (offset > length) { offset-=length; n++; length=scale*(draw_info->dash_pattern[n]+0.5); continue; } if (offset < length) { length-=offset; offset=0.0; break; } offset=0.0; n++; } status=MagickTrue; maximum_length=0.0; total_length=0.0; for (i=1; (i < (ssize_t) number_vertices) && (length >= 0.0); i++) { dx=primitive_info[i].point.x-primitive_info[i-1].point.x; dy=primitive_info[i].point.y-primitive_info[i-1].point.y; maximum_length=hypot((double) dx,dy); if (fabs(length) < DrawEpsilon) { n++; if (fabs(draw_info->dash_pattern[n]) < DrawEpsilon) n=0; length=scale*(draw_info->dash_pattern[n]+(n == 0 ? -0.5 : 0.5)); } for (total_length=0.0; (length >= 0.0) && (maximum_length >= (total_length+length)); ) { total_length+=length; if ((n & 0x01) != 0) { dash_polygon[0]=primitive_info[0]; dash_polygon[0].point.x=(double) (primitive_info[i-1].point.x+dx* total_length/maximum_length); dash_polygon[0].point.y=(double) (primitive_info[i-1].point.y+dy* total_length/maximum_length); j=1; } else { if ((j+1) > (ssize_t) (2*number_vertices)) break; dash_polygon[j]=primitive_info[i-1]; dash_polygon[j].point.x=(double) (primitive_info[i-1].point.x+dx* total_length/maximum_length); dash_polygon[j].point.y=(double) (primitive_info[i-1].point.y+dy* total_length/maximum_length); dash_polygon[j].coordinates=1; j++; dash_polygon[0].coordinates=(size_t) j; dash_polygon[j].primitive=UndefinedPrimitive; status&=DrawStrokePolygon(image,clone_info,dash_polygon,exception); } n++; if (fabs(draw_info->dash_pattern[n]) < DrawEpsilon) n=0; length=scale*(draw_info->dash_pattern[n]+(n == 0 ? -0.5 : 0.5)); } length-=(maximum_length-total_length); if ((n & 0x01) != 0) continue; dash_polygon[j]=primitive_info[i]; dash_polygon[j].coordinates=1; j++; } if ((total_length <= maximum_length) && ((n & 0x01) == 0) && (j > 1)) { dash_polygon[j]=primitive_info[i-1]; dash_polygon[j].point.x+=DrawEpsilon; dash_polygon[j].point.y+=DrawEpsilon; dash_polygon[j].coordinates=1; j++; dash_polygon[0].coordinates=(size_t) j; dash_polygon[j].primitive=UndefinedPrimitive; status&=DrawStrokePolygon(image,clone_info,dash_polygon,exception); } dash_polygon=(PrimitiveInfo *) RelinquishMagickMemory(dash_polygon); clone_info=DestroyDrawInfo(clone_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-dash"); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawImage() draws a graphic primitive on your image. The primitive % may be represented as a string or filename. Precede the filename with an % "at" sign (@) and the contents of the file are drawn on the image. You % can affect how text is drawn by setting one or more members of the draw % info structure. % % The format of the DrawImage method is: % % MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o exception: return any errors or warnings in this structure. % */ static inline MagickBooleanType IsPoint(const char *point) { char *p; double value; value=StringToDouble(point,&p); return((fabs(value) < DrawEpsilon) && (p == point) ? MagickFalse : MagickTrue); } static inline void TracePoint(PrimitiveInfo *primitive_info, const PointInfo point) { primitive_info->coordinates=1; primitive_info->point=point; } MagickExport MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info, ExceptionInfo *exception) { #define RenderImageTag "Render/Image" AffineMatrix affine, current; char keyword[MagickPathExtent], geometry[MagickPathExtent], *next_token, pattern[MagickPathExtent], *primitive, *token; const char *q; double angle, factor, points_extent, primitive_extent; DrawInfo **graphic_context; MagickBooleanType proceed; MagickSizeType number_points; MagickStatusType status; PointInfo point; PrimitiveInfo *primitive_info; PrimitiveType primitive_type; register const char *p; register ssize_t i, x; SegmentInfo bounds; size_t extent, number_stops; ssize_t j, k, n; StopInfo *stops; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (DrawInfo *) NULL); assert(draw_info->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); if ((draw_info->primitive == (char *) NULL) || (*draw_info->primitive == '\0')) return(MagickFalse); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"begin draw-image"); if (*draw_info->primitive != '@') primitive=AcquireString(draw_info->primitive); else primitive=FileToString(draw_info->primitive+1,~0UL,exception); if (primitive == (char *) NULL) return(MagickFalse); primitive_extent=(double) strlen(primitive); (void) SetImageArtifact(image,"MVG",primitive); n=0; number_stops=0; stops=(StopInfo *) NULL; /* Allocate primitive info memory. */ graphic_context=(DrawInfo **) AcquireMagickMemory(sizeof(*graphic_context)); if (graphic_context == (DrawInfo **) NULL) { primitive=DestroyString(primitive); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } number_points=6553; primitive_info=(PrimitiveInfo *) AcquireQuantumMemory((size_t) number_points, sizeof(*primitive_info)); if (primitive_info == (PrimitiveInfo *) NULL) { primitive=DestroyString(primitive); for ( ; n >= 0; n--) graphic_context[n]=DestroyDrawInfo(graphic_context[n]); graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL,draw_info); graphic_context[n]->viewbox=image->page; if ((image->page.width == 0) || (image->page.height == 0)) { graphic_context[n]->viewbox.width=image->columns; graphic_context[n]->viewbox.height=image->rows; } token=AcquireString(primitive); extent=strlen(token)+MagickPathExtent; if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); status=MagickTrue; for (q=primitive; *q != '\0'; ) { /* Interpret graphic primitive. */ GetNextToken(q,&q,MagickPathExtent,keyword); if (*keyword == '\0') break; if (*keyword == '#') { /* Comment. */ while ((*q != '\n') && (*q != '\0')) q++; continue; } p=q-strlen(keyword)-1; primitive_type=UndefinedPrimitive; current=graphic_context[n]->affine; GetAffineMatrix(&affine); switch (*keyword) { case ';': break; case 'a': case 'A': { if (LocaleCompare("affine",keyword) == 0) { GetNextToken(q,&q,extent,token); affine.sx=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); affine.rx=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); affine.ry=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); affine.sy=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); affine.tx=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); affine.ty=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; break; } if (LocaleCompare("alpha",keyword) == 0) { primitive_type=AlphaPrimitive; break; } if (LocaleCompare("arc",keyword) == 0) { primitive_type=ArcPrimitive; break; } status=MagickFalse; break; } case 'b': case 'B': { if (LocaleCompare("bezier",keyword) == 0) { primitive_type=BezierPrimitive; break; } if (LocaleCompare("border-color",keyword) == 0) { GetNextToken(q,&q,extent,token); (void) QueryColorCompliance(token,AllCompliance, &graphic_context[n]->border_color,exception); break; } status=MagickFalse; break; } case 'c': case 'C': { if (LocaleCompare("clip-path",keyword) == 0) { /* Create clip mask. */ GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->clip_mask,token); (void) DrawClipPath(image,graphic_context[n], graphic_context[n]->clip_mask,exception); break; } if (LocaleCompare("clip-rule",keyword) == 0) { ssize_t fill_rule; GetNextToken(q,&q,extent,token); fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse, token); if (fill_rule == -1) status=MagickFalse; else graphic_context[n]->fill_rule=(FillRule) fill_rule; break; } if (LocaleCompare("clip-units",keyword) == 0) { ssize_t clip_units; GetNextToken(q,&q,extent,token); clip_units=ParseCommandOption(MagickClipPathOptions,MagickFalse, token); if (clip_units == -1) { status=MagickFalse; break; } graphic_context[n]->clip_units=(ClipPathUnits) clip_units; if (clip_units == ObjectBoundingBox) { GetAffineMatrix(&current); affine.sx=draw_info->bounds.x2; affine.sy=draw_info->bounds.y2; affine.tx=draw_info->bounds.x1; affine.ty=draw_info->bounds.y1; break; } break; } if (LocaleCompare("circle",keyword) == 0) { primitive_type=CirclePrimitive; break; } if (LocaleCompare("color",keyword) == 0) { primitive_type=ColorPrimitive; break; } status=MagickFalse; break; } case 'd': case 'D': { if (LocaleCompare("decorate",keyword) == 0) { ssize_t decorate; GetNextToken(q,&q,extent,token); decorate=ParseCommandOption(MagickDecorateOptions,MagickFalse, token); if (decorate == -1) status=MagickFalse; else graphic_context[n]->decorate=(DecorationType) decorate; break; } if (LocaleCompare("density",keyword) == 0) { GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->density,token); break; } if (LocaleCompare("direction",keyword) == 0) { ssize_t direction; GetNextToken(q,&q,extent,token); direction=ParseCommandOption(MagickDirectionOptions,MagickFalse, token); if (direction == -1) status=MagickFalse; else graphic_context[n]->direction=(DirectionType) direction; break; } status=MagickFalse; break; } case 'e': case 'E': { if (LocaleCompare("ellipse",keyword) == 0) { primitive_type=EllipsePrimitive; break; } if (LocaleCompare("encoding",keyword) == 0) { GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->encoding,token); break; } status=MagickFalse; break; } case 'f': case 'F': { if (LocaleCompare("fill",keyword) == 0) { GetNextToken(q,&q,extent,token); (void) FormatLocaleString(pattern,MagickPathExtent,"%s",token); if (GetImageArtifact(image,pattern) != (const char *) NULL) (void) DrawPatternPath(image,draw_info,token, &graphic_context[n]->fill_pattern,exception); else { status&=QueryColorCompliance(token,AllCompliance, &graphic_context[n]->fill,exception); if (graphic_context[n]->fill_alpha != OpaqueAlpha) graphic_context[n]->fill.alpha=graphic_context[n]->fill_alpha; if (status == MagickFalse) { ImageInfo *pattern_info; pattern_info=AcquireImageInfo(); (void) CopyMagickString(pattern_info->filename,token, MagickPathExtent); graphic_context[n]->fill_pattern=ReadImage(pattern_info, exception); CatchException(exception); pattern_info=DestroyImageInfo(pattern_info); } } break; } if (LocaleCompare("fill-opacity",keyword) == 0) { GetNextToken(q,&q,extent,token); factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0; graphic_context[n]->fill.alpha=(MagickRealType) (QuantumRange- ClampToQuantum((MagickRealType) QuantumRange*(1.0-factor* StringToDouble(token,&next_token)))); if (token == next_token) status=MagickFalse; break; } if (LocaleCompare("fill-rule",keyword) == 0) { ssize_t fill_rule; GetNextToken(q,&q,extent,token); fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse, token); if (fill_rule == -1) status=MagickFalse; else graphic_context[n]->fill_rule=(FillRule) fill_rule; break; } if (LocaleCompare("font",keyword) == 0) { GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->font,token); if (LocaleCompare("none",token) == 0) graphic_context[n]->font=(char *) RelinquishMagickMemory( graphic_context[n]->font); break; } if (LocaleCompare("font-family",keyword) == 0) { GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->family,token); break; } if (LocaleCompare("font-size",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->pointsize=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; break; } if (LocaleCompare("font-stretch",keyword) == 0) { ssize_t stretch; GetNextToken(q,&q,extent,token); stretch=ParseCommandOption(MagickStretchOptions,MagickFalse,token); if (stretch == -1) status=MagickFalse; else graphic_context[n]->stretch=(StretchType) stretch; break; } if (LocaleCompare("font-style",keyword) == 0) { ssize_t style; GetNextToken(q,&q,extent,token); style=ParseCommandOption(MagickStyleOptions,MagickFalse,token); if (style == -1) status=MagickFalse; else graphic_context[n]->style=(StyleType) style; break; } if (LocaleCompare("font-weight",keyword) == 0) { ssize_t weight; GetNextToken(q,&q,extent,token); weight=ParseCommandOption(MagickWeightOptions,MagickFalse,token); if (weight == -1) weight=(ssize_t) StringToUnsignedLong(token); graphic_context[n]->weight=(size_t) weight; break; } status=MagickFalse; break; } case 'g': case 'G': { if (LocaleCompare("gradient-units",keyword) == 0) { GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("gravity",keyword) == 0) { ssize_t gravity; GetNextToken(q,&q,extent,token); gravity=ParseCommandOption(MagickGravityOptions,MagickFalse,token); if (gravity == -1) status=MagickFalse; else graphic_context[n]->gravity=(GravityType) gravity; break; } status=MagickFalse; break; } case 'i': case 'I': { if (LocaleCompare("image",keyword) == 0) { ssize_t compose; primitive_type=ImagePrimitive; GetNextToken(q,&q,extent,token); compose=ParseCommandOption(MagickComposeOptions,MagickFalse,token); if (compose == -1) status=MagickFalse; else graphic_context[n]->compose=(CompositeOperator) compose; break; } if (LocaleCompare("interline-spacing",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->interline_spacing=StringToDouble(token, &next_token); if (token == next_token) status=MagickFalse; break; } if (LocaleCompare("interword-spacing",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->interword_spacing=StringToDouble(token, &next_token); if (token == next_token) status=MagickFalse; break; } status=MagickFalse; break; } case 'k': case 'K': { if (LocaleCompare("kerning",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->kerning=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; break; } status=MagickFalse; break; } case 'l': case 'L': { if (LocaleCompare("line",keyword) == 0) primitive_type=LinePrimitive; else status=MagickFalse; break; } case 'o': case 'O': { if (LocaleCompare("offset",keyword) == 0) { GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("opacity",keyword) == 0) { GetNextToken(q,&q,extent,token); factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0; graphic_context[n]->alpha=(Quantum) (QuantumRange*(1.0- (QuantumScale*graphic_context[n]->alpha*(1.0-factor* StringToDouble(token,&next_token))))); graphic_context[n]->fill_alpha=QuantumRange*(1.0-(QuantumScale* graphic_context[n]->fill_alpha*(1.0-factor*StringToDouble(token, &next_token)))); graphic_context[n]->stroke_alpha=QuantumRange*(1.0-(QuantumScale* graphic_context[n]->stroke_alpha*(1.0-factor*StringToDouble(token, &next_token)))); if (token == next_token) status=MagickFalse; break; } status=MagickFalse; break; } case 'p': case 'P': { if (LocaleCompare("path",keyword) == 0) { primitive_type=PathPrimitive; break; } if (LocaleCompare("point",keyword) == 0) { primitive_type=PointPrimitive; break; } if (LocaleCompare("polyline",keyword) == 0) { primitive_type=PolylinePrimitive; break; } if (LocaleCompare("polygon",keyword) == 0) { primitive_type=PolygonPrimitive; break; } if (LocaleCompare("pop",keyword) == 0) { GetNextToken(q,&q,extent,token); if (LocaleCompare("clip-path",token) == 0) break; if (LocaleCompare("defs",token) == 0) break; if (LocaleCompare("gradient",token) == 0) break; if (LocaleCompare("graphic-context",token) == 0) { if (n <= 0) { (void) ThrowMagickException(exception,GetMagickModule(), DrawError,"UnbalancedGraphicContextPushPop","`%s'",token); status=MagickFalse; n=0; break; } if (graphic_context[n]->clip_mask != (char *) NULL) if (LocaleCompare(graphic_context[n]->clip_mask, graphic_context[n-1]->clip_mask) != 0) (void) SetImageMask(image,ReadPixelMask,(Image *) NULL, exception); graphic_context[n]=DestroyDrawInfo(graphic_context[n]); n--; break; } if (LocaleCompare("pattern",token) == 0) break; status=MagickFalse; break; } if (LocaleCompare("push",keyword) == 0) { GetNextToken(q,&q,extent,token); if (LocaleCompare("clip-path",token) == 0) { char name[MagickPathExtent]; GetNextToken(q,&q,extent,token); (void) FormatLocaleString(name,MagickPathExtent,"%s",token); for (p=q; *q != '\0'; ) { GetNextToken(q,&q,extent,token); if (LocaleCompare(token,"pop") != 0) continue; GetNextToken(q,(const char **) NULL,extent,token); if (LocaleCompare(token,"clip-path") != 0) continue; break; } (void) CopyMagickString(token,p,(size_t) (q-p-4+1)); (void) SetImageArtifact(image,name,token); GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("gradient",token) == 0) { char key[2*MagickPathExtent], name[MagickPathExtent], type[MagickPathExtent]; SegmentInfo segment; GetNextToken(q,&q,extent,token); (void) CopyMagickString(name,token,MagickPathExtent); GetNextToken(q,&q,extent,token); (void) CopyMagickString(type,token,MagickPathExtent); GetNextToken(q,&q,extent,token); segment.x1=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); segment.y1=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); segment.x2=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); segment.y2=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; if (LocaleCompare(type,"radial") == 0) { GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); } for (p=q; *q != '\0'; ) { GetNextToken(q,&q,extent,token); if (LocaleCompare(token,"pop") != 0) continue; GetNextToken(q,(const char **) NULL,extent,token); if (LocaleCompare(token,"gradient") != 0) continue; break; } (void) CopyMagickString(token,p,(size_t) (q-p-4+1)); bounds.x1=graphic_context[n]->affine.sx*segment.x1+ graphic_context[n]->affine.ry*segment.y1+ graphic_context[n]->affine.tx; bounds.y1=graphic_context[n]->affine.rx*segment.x1+ graphic_context[n]->affine.sy*segment.y1+ graphic_context[n]->affine.ty; bounds.x2=graphic_context[n]->affine.sx*segment.x2+ graphic_context[n]->affine.ry*segment.y2+ graphic_context[n]->affine.tx; bounds.y2=graphic_context[n]->affine.rx*segment.x2+ graphic_context[n]->affine.sy*segment.y2+ graphic_context[n]->affine.ty; (void) FormatLocaleString(key,MagickPathExtent,"%s",name); (void) SetImageArtifact(image,key,token); (void) FormatLocaleString(key,MagickPathExtent,"%s-type",name); (void) SetImageArtifact(image,key,type); (void) FormatLocaleString(key,MagickPathExtent,"%s-geometry", name); (void) FormatLocaleString(geometry,MagickPathExtent, "%gx%g%+.15g%+.15g", MagickMax(fabs(bounds.x2-bounds.x1+1.0),1.0), MagickMax(fabs(bounds.y2-bounds.y1+1.0),1.0), bounds.x1,bounds.y1); (void) SetImageArtifact(image,key,geometry); GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("pattern",token) == 0) { char key[2*MagickPathExtent], name[MagickPathExtent]; RectangleInfo pattern_bounds; GetNextToken(q,&q,extent,token); (void) CopyMagickString(name,token,MagickPathExtent); GetNextToken(q,&q,extent,token); pattern_bounds.x=(ssize_t) ceil(StringToDouble(token, &next_token)-0.5); if (token == next_token) status=MagickFalse; GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); pattern_bounds.y=(ssize_t) ceil(StringToDouble(token, &next_token)-0.5); if (token == next_token) status=MagickFalse; GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); pattern_bounds.width=(size_t) floor(StringToDouble(token, &next_token)+0.5); if (token == next_token) status=MagickFalse; GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); pattern_bounds.height=(size_t) floor(StringToDouble(token, &next_token)+0.5); if (token == next_token) status=MagickFalse; for (p=q; *q != '\0'; ) { GetNextToken(q,&q,extent,token); if (LocaleCompare(token,"pop") != 0) continue; GetNextToken(q,(const char **) NULL,extent,token); if (LocaleCompare(token,"pattern") != 0) continue; break; } (void) CopyMagickString(token,p,(size_t) (q-p-4+1)); (void) FormatLocaleString(key,MagickPathExtent,"%s",name); (void) SetImageArtifact(image,key,token); (void) FormatLocaleString(key,MagickPathExtent,"%s-geometry", name); (void) FormatLocaleString(geometry,MagickPathExtent, "%.20gx%.20g%+.20g%+.20g",(double)pattern_bounds.width, (double)pattern_bounds.height,(double)pattern_bounds.x, (double)pattern_bounds.y); (void) SetImageArtifact(image,key,geometry); GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("graphic-context",token) == 0) { n++; graphic_context=(DrawInfo **) ResizeQuantumMemory( graphic_context,(size_t) (n+1),sizeof(*graphic_context)); if (graphic_context == (DrawInfo **) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); break; } graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL, graphic_context[n-1]); break; } if (LocaleCompare("defs",token) == 0) break; status=MagickFalse; break; } status=MagickFalse; break; } case 'r': case 'R': { if (LocaleCompare("rectangle",keyword) == 0) { primitive_type=RectanglePrimitive; break; } if (LocaleCompare("rotate",keyword) == 0) { GetNextToken(q,&q,extent,token); angle=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; affine.sx=cos(DegreesToRadians(fmod((double) angle,360.0))); affine.rx=sin(DegreesToRadians(fmod((double) angle,360.0))); affine.ry=(-sin(DegreesToRadians(fmod((double) angle,360.0)))); affine.sy=cos(DegreesToRadians(fmod((double) angle,360.0))); break; } if (LocaleCompare("roundRectangle",keyword) == 0) { primitive_type=RoundRectanglePrimitive; break; } status=MagickFalse; break; } case 's': case 'S': { if (LocaleCompare("scale",keyword) == 0) { GetNextToken(q,&q,extent,token); affine.sx=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); affine.sy=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; break; } if (LocaleCompare("skewX",keyword) == 0) { GetNextToken(q,&q,extent,token); angle=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; affine.ry=sin(DegreesToRadians(angle)); break; } if (LocaleCompare("skewY",keyword) == 0) { GetNextToken(q,&q,extent,token); angle=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; affine.rx=(-tan(DegreesToRadians(angle)/2.0)); break; } if (LocaleCompare("stop-color",keyword) == 0) { PixelInfo stop_color; number_stops++; if (number_stops == 1) stops=(StopInfo *) AcquireQuantumMemory(2,sizeof(*stops)); else if (number_stops > 2) stops=(StopInfo *) ResizeQuantumMemory(stops,number_stops, sizeof(*stops)); if (stops == (StopInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); break; } GetNextToken(q,&q,extent,token); (void) QueryColorCompliance(token,AllCompliance,&stop_color, exception); stops[number_stops-1].color=stop_color; GetNextToken(q,&q,extent,token); stops[number_stops-1].offset=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; break; } if (LocaleCompare("stroke",keyword) == 0) { GetNextToken(q,&q,extent,token); (void) FormatLocaleString(pattern,MagickPathExtent,"%s",token); if (GetImageArtifact(image,pattern) != (const char *) NULL) (void) DrawPatternPath(image,draw_info,token, &graphic_context[n]->stroke_pattern,exception); else { status&=QueryColorCompliance(token,AllCompliance, &graphic_context[n]->stroke,exception); if (graphic_context[n]->stroke_alpha != OpaqueAlpha) graphic_context[n]->stroke.alpha= graphic_context[n]->stroke_alpha; if (status == MagickFalse) { ImageInfo *pattern_info; pattern_info=AcquireImageInfo(); (void) CopyMagickString(pattern_info->filename,token, MagickPathExtent); graphic_context[n]->stroke_pattern=ReadImage(pattern_info, exception); CatchException(exception); pattern_info=DestroyImageInfo(pattern_info); } } break; } if (LocaleCompare("stroke-antialias",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->stroke_antialias= StringToLong(token) != 0 ? MagickTrue : MagickFalse; break; } if (LocaleCompare("stroke-dasharray",keyword) == 0) { if (graphic_context[n]->dash_pattern != (double *) NULL) graphic_context[n]->dash_pattern=(double *) RelinquishMagickMemory(graphic_context[n]->dash_pattern); if (IsPoint(q) != MagickFalse) { const char *r; r=q; GetNextToken(r,&r,extent,token); if (*token == ',') GetNextToken(r,&r,extent,token); for (x=0; IsPoint(token) != MagickFalse; x++) { GetNextToken(r,&r,extent,token); if (*token == ',') GetNextToken(r,&r,extent,token); } graphic_context[n]->dash_pattern=(double *) AcquireQuantumMemory((size_t) (2UL*x+2UL), sizeof(*graphic_context[n]->dash_pattern)); if (graphic_context[n]->dash_pattern == (double *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); status=MagickFalse; break; } for (j=0; j < x; j++) { GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); graphic_context[n]->dash_pattern[j]=StringToDouble(token, &next_token); if (token == next_token) status=MagickFalse; if (graphic_context[n]->dash_pattern[j] < 0.0) status=MagickFalse; } if ((x & 0x01) != 0) for ( ; j < (2*x); j++) graphic_context[n]->dash_pattern[j]= graphic_context[n]->dash_pattern[j-x]; graphic_context[n]->dash_pattern[j]=0.0; break; } GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("stroke-dashoffset",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->dash_offset=StringToDouble(token, &next_token); if (token == next_token) status=MagickFalse; break; } if (LocaleCompare("stroke-linecap",keyword) == 0) { ssize_t linecap; GetNextToken(q,&q,extent,token); linecap=ParseCommandOption(MagickLineCapOptions,MagickFalse,token); if (linecap == -1) status=MagickFalse; else graphic_context[n]->linecap=(LineCap) linecap; break; } if (LocaleCompare("stroke-linejoin",keyword) == 0) { ssize_t linejoin; GetNextToken(q,&q,extent,token); linejoin=ParseCommandOption(MagickLineJoinOptions,MagickFalse, token); if (linejoin == -1) status=MagickFalse; else graphic_context[n]->linejoin=(LineJoin) linejoin; break; } if (LocaleCompare("stroke-miterlimit",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->miterlimit=StringToUnsignedLong(token); break; } if (LocaleCompare("stroke-opacity",keyword) == 0) { GetNextToken(q,&q,extent,token); factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0; graphic_context[n]->stroke.alpha=(MagickRealType) (QuantumRange- ClampToQuantum((MagickRealType) QuantumRange*(1.0-factor* StringToDouble(token,&next_token)))); if (token == next_token) status=MagickFalse; break; } if (LocaleCompare("stroke-width",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->stroke_width=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; break; } status=MagickFalse; break; } case 't': case 'T': { if (LocaleCompare("text",keyword) == 0) { primitive_type=TextPrimitive; break; } if (LocaleCompare("text-align",keyword) == 0) { ssize_t align; GetNextToken(q,&q,extent,token); align=ParseCommandOption(MagickAlignOptions,MagickFalse,token); if (align == -1) status=MagickFalse; else graphic_context[n]->align=(AlignType) align; break; } if (LocaleCompare("text-anchor",keyword) == 0) { ssize_t align; GetNextToken(q,&q,extent,token); align=ParseCommandOption(MagickAlignOptions,MagickFalse,token); if (align == -1) status=MagickFalse; else graphic_context[n]->align=(AlignType) align; break; } if (LocaleCompare("text-antialias",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->text_antialias=StringToLong(token) != 0 ? MagickTrue : MagickFalse; break; } if (LocaleCompare("text-undercolor",keyword) == 0) { GetNextToken(q,&q,extent,token); (void) QueryColorCompliance(token,AllCompliance, &graphic_context[n]->undercolor,exception); break; } if (LocaleCompare("translate",keyword) == 0) { GetNextToken(q,&q,extent,token); affine.tx=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); affine.ty=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; break; } status=MagickFalse; break; } case 'v': case 'V': { if (LocaleCompare("viewbox",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->viewbox.x=(ssize_t) ceil(StringToDouble(token, &next_token)-0.5); if (token == next_token) status=MagickFalse; GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); graphic_context[n]->viewbox.y=(ssize_t) ceil(StringToDouble(token, &next_token)-0.5); if (token == next_token) status=MagickFalse; GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); graphic_context[n]->viewbox.width=(size_t) floor(StringToDouble( token,&next_token)+0.5); if (token == next_token) status=MagickFalse; GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); graphic_context[n]->viewbox.height=(size_t) floor(StringToDouble( token,&next_token)+0.5); if (token == next_token) status=MagickFalse; break; } status=MagickFalse; break; } default: { status=MagickFalse; break; } } if (status == MagickFalse) break; if ((fabs(affine.sx-1.0) >= DrawEpsilon) || (fabs(affine.rx) >= DrawEpsilon) || (fabs(affine.ry) >= DrawEpsilon) || (fabs(affine.sy-1.0) >= DrawEpsilon) || (fabs(affine.tx) >= DrawEpsilon) || (fabs(affine.ty) >= DrawEpsilon)) { graphic_context[n]->affine.sx=current.sx*affine.sx+current.ry*affine.rx; graphic_context[n]->affine.rx=current.rx*affine.sx+current.sy*affine.rx; graphic_context[n]->affine.ry=current.sx*affine.ry+current.ry*affine.sy; graphic_context[n]->affine.sy=current.rx*affine.ry+current.sy*affine.sy; graphic_context[n]->affine.tx=current.sx*affine.tx+current.ry*affine.ty+ current.tx; graphic_context[n]->affine.ty=current.rx*affine.tx+current.sy*affine.ty+ current.ty; } if (primitive_type == UndefinedPrimitive) { if (*q == '\0') { if (number_stops > 1) { GradientType type; type=LinearGradient; if (draw_info->gradient.type == RadialGradient) type=RadialGradient; (void) GradientImage(image,type,PadSpread,stops,number_stops, exception); } if (number_stops > 0) stops=(StopInfo *) RelinquishMagickMemory(stops); } if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int) (q-p),p); continue; } /* Parse the primitive attributes. */ i=0; j=0; primitive_info[0].point.x=0.0; primitive_info[0].point.y=0.0; for (x=0; *q != '\0'; x++) { /* Define points. */ if (IsPoint(q) == MagickFalse) break; GetNextToken(q,&q,extent,token); point.x=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); point.y=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; GetNextToken(q,(const char **) NULL,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); primitive_info[i].primitive=primitive_type; primitive_info[i].point=point; primitive_info[i].coordinates=0; primitive_info[i].method=FloodfillMethod; i++; if (i < (ssize_t) number_points) continue; number_points<<=1; primitive_info=(PrimitiveInfo *) ResizeQuantumMemory(primitive_info, (size_t) number_points,sizeof(*primitive_info)); if ((primitive_info == (PrimitiveInfo *) NULL) || (number_points != (MagickSizeType) ((size_t) number_points))) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } primitive_info[j].primitive=primitive_type; primitive_info[j].coordinates=(size_t) x; primitive_info[j].method=FloodfillMethod; primitive_info[j].text=(char *) NULL; /* Circumscribe primitive within a circle. */ bounds.x1=primitive_info[j].point.x; bounds.y1=primitive_info[j].point.y; bounds.x2=primitive_info[j].point.x; bounds.y2=primitive_info[j].point.y; for (k=1; k < (ssize_t) primitive_info[j].coordinates; k++) { point=primitive_info[j+k].point; if (point.x < bounds.x1) bounds.x1=point.x; if (point.y < bounds.y1) bounds.y1=point.y; if (point.x > bounds.x2) bounds.x2=point.x; if (point.y > bounds.y2) bounds.y2=point.y; } /* Speculate how many points our primitive might consume. */ points_extent=(double) primitive_info[j].coordinates; switch (primitive_type) { case RectanglePrimitive: { points_extent*=5; break; } case RoundRectanglePrimitive: { double alpha, beta, radius; alpha=bounds.x2-bounds.x1; beta=bounds.y2-bounds.y1; radius=hypot((double) alpha,(double) beta); points_extent*=5; points_extent+=2*ceil((double) MagickPI*radius)+6*BezierQuantum+360; break; } case BezierPrimitive: { if (primitive_info[j].coordinates > 107) (void) ThrowMagickException(exception,GetMagickModule(),DrawError, "TooManyBezierCoordinates","`%s'",token); points_extent=(double) (BezierQuantum*primitive_info[j].coordinates); break; } case PathPrimitive: { char *s, *t; GetNextToken(q,&q,extent,token); points_extent=1; t=token; for (s=token; *s != '\0'; s=t) { double value; value=StringToDouble(s,&t); (void) value; if (s == t) { t++; continue; } points_extent++; } points_extent=points_extent*BezierQuantum; break; } case CirclePrimitive: case ArcPrimitive: case EllipsePrimitive: { double alpha, beta, radius; alpha=bounds.x2-bounds.x1; beta=bounds.y2-bounds.y1; radius=hypot((double) alpha,(double) beta); points_extent=2*ceil((double) MagickPI*radius)+6*BezierQuantum+360; break; } default: break; } if (((double) ((size_t) points_extent)) < points_extent) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); break; } if (((MagickSizeType) (i+points_extent)) >= number_points) { /* Resize based on speculative points required by primitive. */ number_points+=points_extent+1; primitive_info=(PrimitiveInfo *) ResizeQuantumMemory(primitive_info, (size_t) number_points,sizeof(*primitive_info)); if ((primitive_info == (PrimitiveInfo *) NULL) || (number_points != (MagickSizeType) ((size_t) number_points))) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); break; } } switch (primitive_type) { case PointPrimitive: default: { if (primitive_info[j].coordinates != 1) { status=MagickFalse; break; } TracePoint(primitive_info+j,primitive_info[j].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case LinePrimitive: { if (primitive_info[j].coordinates != 2) { status=MagickFalse; break; } TraceLine(primitive_info+j,primitive_info[j].point, primitive_info[j+1].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case RectanglePrimitive: { if (primitive_info[j].coordinates != 2) { status=MagickFalse; break; } TraceRectangle(primitive_info+j,primitive_info[j].point, primitive_info[j+1].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case RoundRectanglePrimitive: { if (primitive_info[j].coordinates != 3) { status=MagickFalse; break; } TraceRoundRectangle(primitive_info+j,primitive_info[j].point, primitive_info[j+1].point,primitive_info[j+2].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case ArcPrimitive: { if (primitive_info[j].coordinates != 3) { primitive_type=UndefinedPrimitive; break; } TraceArc(primitive_info+j,primitive_info[j].point, primitive_info[j+1].point,primitive_info[j+2].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case EllipsePrimitive: { if (primitive_info[j].coordinates != 3) { status=MagickFalse; break; } TraceEllipse(primitive_info+j,primitive_info[j].point, primitive_info[j+1].point,primitive_info[j+2].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case CirclePrimitive: { if (primitive_info[j].coordinates != 2) { status=MagickFalse; break; } TraceCircle(primitive_info+j,primitive_info[j].point, primitive_info[j+1].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case PolylinePrimitive: break; case PolygonPrimitive: { primitive_info[i]=primitive_info[j]; primitive_info[i].coordinates=0; primitive_info[j].coordinates++; i++; break; } case BezierPrimitive: { if (primitive_info[j].coordinates < 3) { status=MagickFalse; break; } TraceBezier(primitive_info+j,primitive_info[j].coordinates); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case PathPrimitive: { i=(ssize_t) (j+TracePath(primitive_info+j,token)); break; } case AlphaPrimitive: case ColorPrimitive: { ssize_t method; if (primitive_info[j].coordinates != 1) { status=MagickFalse; break; } GetNextToken(q,&q,extent,token); method=ParseCommandOption(MagickMethodOptions,MagickFalse,token); if (method == -1) status=MagickFalse; else primitive_info[j].method=(PaintMethod) method; break; } case TextPrimitive: { if (primitive_info[j].coordinates != 1) { status=MagickFalse; break; } if (*token != ',') GetNextToken(q,&q,extent,token); primitive_info[j].text=AcquireString(token); break; } case ImagePrimitive: { if (primitive_info[j].coordinates != 2) { status=MagickFalse; break; } GetNextToken(q,&q,extent,token); primitive_info[j].text=AcquireString(token); break; } } if (primitive_info == (PrimitiveInfo *) NULL) break; if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int) (q-p),p); if (status == MagickFalse) break; primitive_info[i].primitive=UndefinedPrimitive; if (i == 0) continue; /* Transform points. */ for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) { point=primitive_info[i].point; primitive_info[i].point.x=graphic_context[n]->affine.sx*point.x+ graphic_context[n]->affine.ry*point.y+graphic_context[n]->affine.tx; primitive_info[i].point.y=graphic_context[n]->affine.rx*point.x+ graphic_context[n]->affine.sy*point.y+graphic_context[n]->affine.ty; point=primitive_info[i].point; if (point.x < graphic_context[n]->bounds.x1) graphic_context[n]->bounds.x1=point.x; if (point.y < graphic_context[n]->bounds.y1) graphic_context[n]->bounds.y1=point.y; if (point.x > graphic_context[n]->bounds.x2) graphic_context[n]->bounds.x2=point.x; if (point.y > graphic_context[n]->bounds.y2) graphic_context[n]->bounds.y2=point.y; if (primitive_info[i].primitive == ImagePrimitive) break; if (i >= (ssize_t) number_points) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); } if (graphic_context[n]->render != MagickFalse) { if ((n != 0) && (graphic_context[n]->clip_mask != (char *) NULL) && (LocaleCompare(graphic_context[n]->clip_mask, graphic_context[n-1]->clip_mask) != 0)) status&=DrawClipPath(image,graphic_context[n], graphic_context[n]->clip_mask,exception); status&=DrawPrimitive(image,graphic_context[n],primitive_info, exception); } if (primitive_info->text != (char *) NULL) primitive_info->text=(char *) RelinquishMagickMemory( primitive_info->text); proceed=SetImageProgress(image,RenderImageTag,q-primitive,(MagickSizeType) primitive_extent); if (proceed == MagickFalse) break; if (status == 0) break; } if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"end draw-image"); /* Relinquish resources. */ token=DestroyString(token); if (primitive_info != (PrimitiveInfo *) NULL) primitive_info=(PrimitiveInfo *) RelinquishMagickMemory(primitive_info); primitive=DestroyString(primitive); for ( ; n >= 0; n--) graphic_context[n]=DestroyDrawInfo(graphic_context[n]); graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context); if (status == MagickFalse) ThrowBinaryException(DrawError,"NonconformingDrawingPrimitiveDefinition", keyword); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w G r a d i e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawGradientImage() draws a linear gradient on the image. % % The format of the DrawGradientImage method is: % % MagickBooleanType DrawGradientImage(Image *image, % const DrawInfo *draw_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o exception: return any errors or warnings in this structure. % */ static inline double GetStopColorOffset(const GradientInfo *gradient, const ssize_t x,const ssize_t y) { switch (gradient->type) { case UndefinedGradient: case LinearGradient: { double gamma, length, offset, scale; PointInfo p, q; const SegmentInfo *gradient_vector; gradient_vector=(&gradient->gradient_vector); p.x=gradient_vector->x2-gradient_vector->x1; p.y=gradient_vector->y2-gradient_vector->y1; q.x=(double) x-gradient_vector->x1; q.y=(double) y-gradient_vector->y1; length=sqrt(q.x*q.x+q.y*q.y); gamma=sqrt(p.x*p.x+p.y*p.y)*length; gamma=PerceptibleReciprocal(gamma); scale=p.x*q.x+p.y*q.y; offset=gamma*scale*length; return(offset); } case RadialGradient: { PointInfo v; if (gradient->spread == RepeatSpread) { v.x=(double) x-gradient->center.x; v.y=(double) y-gradient->center.y; return(sqrt(v.x*v.x+v.y*v.y)); } v.x=(double) (((x-gradient->center.x)*cos(DegreesToRadians( gradient->angle)))+((y-gradient->center.y)*sin(DegreesToRadians( gradient->angle))))/gradient->radii.x; v.y=(double) (((x-gradient->center.x)*sin(DegreesToRadians( gradient->angle)))-((y-gradient->center.y)*cos(DegreesToRadians( gradient->angle))))/gradient->radii.y; return(sqrt(v.x*v.x+v.y*v.y)); } } return(0.0); } static int StopInfoCompare(const void *x,const void *y) { StopInfo *stop_1, *stop_2; stop_1=(StopInfo *) x; stop_2=(StopInfo *) y; if (stop_1->offset > stop_2->offset) return(1); if (fabs(stop_1->offset-stop_2->offset) <= DrawEpsilon) return(0); return(-1); } MagickExport MagickBooleanType DrawGradientImage(Image *image, const DrawInfo *draw_info,ExceptionInfo *exception) { CacheView *image_view; const GradientInfo *gradient; const SegmentInfo *gradient_vector; double length; MagickBooleanType status; PixelInfo zero; PointInfo point; RectangleInfo bounding_box; ssize_t y; /* Draw linear or radial gradient on image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (const DrawInfo *) NULL); gradient=(&draw_info->gradient); qsort(gradient->stops,gradient->number_stops,sizeof(StopInfo), StopInfoCompare); gradient_vector=(&gradient->gradient_vector); point.x=gradient_vector->x2-gradient_vector->x1; point.y=gradient_vector->y2-gradient_vector->y1; length=sqrt(point.x*point.x+point.y*point.y); bounding_box=gradient->bounding_box; status=MagickTrue; GetPixelInfo(image,&zero); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_number_threads(image,image,bounding_box.height-bounding_box.y,1) #endif for (y=bounding_box.y; y < (ssize_t) bounding_box.height; y++) { PixelInfo composite, pixel; double alpha, offset; register Quantum *magick_restrict q; register ssize_t i, x; ssize_t j; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } pixel=zero; composite=zero; offset=GetStopColorOffset(gradient,0,y); if (gradient->type != RadialGradient) offset/=length; for (x=bounding_box.x; x < (ssize_t) bounding_box.width; x++) { GetPixelInfoPixel(image,q,&pixel); switch (gradient->spread) { case UndefinedSpread: case PadSpread: { if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) || (y != (ssize_t) ceil(gradient_vector->y1-0.5))) { offset=GetStopColorOffset(gradient,x,y); if (gradient->type != RadialGradient) offset/=length; } for (i=0; i < (ssize_t) gradient->number_stops; i++) if (offset < gradient->stops[i].offset) break; if ((offset < 0.0) || (i == 0)) composite=gradient->stops[0].color; else if ((offset > 1.0) || (i == (ssize_t) gradient->number_stops)) composite=gradient->stops[gradient->number_stops-1].color; else { j=i; i--; alpha=(offset-gradient->stops[i].offset)/ (gradient->stops[j].offset-gradient->stops[i].offset); CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha, &gradient->stops[j].color,alpha,&composite); } break; } case ReflectSpread: { if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) || (y != (ssize_t) ceil(gradient_vector->y1-0.5))) { offset=GetStopColorOffset(gradient,x,y); if (gradient->type != RadialGradient) offset/=length; } if (offset < 0.0) offset=(-offset); if ((ssize_t) fmod(offset,2.0) == 0) offset=fmod(offset,1.0); else offset=1.0-fmod(offset,1.0); for (i=0; i < (ssize_t) gradient->number_stops; i++) if (offset < gradient->stops[i].offset) break; if (i == 0) composite=gradient->stops[0].color; else if (i == (ssize_t) gradient->number_stops) composite=gradient->stops[gradient->number_stops-1].color; else { j=i; i--; alpha=(offset-gradient->stops[i].offset)/ (gradient->stops[j].offset-gradient->stops[i].offset); CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha, &gradient->stops[j].color,alpha,&composite); } break; } case RepeatSpread: { MagickBooleanType antialias; double repeat; antialias=MagickFalse; repeat=0.0; if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) || (y != (ssize_t) ceil(gradient_vector->y1-0.5))) { offset=GetStopColorOffset(gradient,x,y); if (gradient->type == LinearGradient) { repeat=fmod(offset,length); if (repeat < 0.0) repeat=length-fmod(-repeat,length); else repeat=fmod(offset,length); antialias=(repeat < length) && ((repeat+1.0) > length) ? MagickTrue : MagickFalse; offset=repeat/length; } else { repeat=fmod(offset,gradient->radius); if (repeat < 0.0) repeat=gradient->radius-fmod(-repeat,gradient->radius); else repeat=fmod(offset,gradient->radius); antialias=repeat+1.0 > gradient->radius ? MagickTrue : MagickFalse; offset=repeat/gradient->radius; } } for (i=0; i < (ssize_t) gradient->number_stops; i++) if (offset < gradient->stops[i].offset) break; if (i == 0) composite=gradient->stops[0].color; else if (i == (ssize_t) gradient->number_stops) composite=gradient->stops[gradient->number_stops-1].color; else { j=i; i--; alpha=(offset-gradient->stops[i].offset)/ (gradient->stops[j].offset-gradient->stops[i].offset); if (antialias != MagickFalse) { if (gradient->type == LinearGradient) alpha=length-repeat; else alpha=gradient->radius-repeat; i=0; j=(ssize_t) gradient->number_stops-1L; } CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha, &gradient->stops[j].color,alpha,&composite); } break; } } CompositePixelInfoOver(&composite,composite.alpha,&pixel,pixel.alpha, &pixel); SetPixelViaPixelInfo(image,&pixel,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w P a t t e r n P a t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawPatternPath() draws a pattern. % % The format of the DrawPatternPath method is: % % MagickBooleanType DrawPatternPath(Image *image,const DrawInfo *draw_info, % const char *name,Image **pattern,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o name: the pattern name. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType DrawPatternPath(Image *image, const DrawInfo *draw_info,const char *name,Image **pattern, ExceptionInfo *exception) { char property[MagickPathExtent]; const char *geometry, *path, *type; DrawInfo *clone_info; ImageInfo *image_info; MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (const DrawInfo *) NULL); assert(name != (const char *) NULL); (void) FormatLocaleString(property,MagickPathExtent,"%s",name); path=GetImageArtifact(image,property); if (path == (const char *) NULL) return(MagickFalse); (void) FormatLocaleString(property,MagickPathExtent,"%s-geometry",name); geometry=GetImageArtifact(image,property); if (geometry == (const char *) NULL) return(MagickFalse); if ((*pattern) != (Image *) NULL) *pattern=DestroyImage(*pattern); image_info=AcquireImageInfo(); image_info->size=AcquireString(geometry); *pattern=AcquireImage(image_info,exception); image_info=DestroyImageInfo(image_info); (void) QueryColorCompliance("#000000ff",AllCompliance, &(*pattern)->background_color,exception); (void) SetImageBackgroundColor(*pattern,exception); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(), "begin pattern-path %s %s",name,geometry); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); clone_info->fill_pattern=NewImageList(); clone_info->stroke_pattern=NewImageList(); (void) FormatLocaleString(property,MagickPathExtent,"%s-type",name); type=GetImageArtifact(image,property); if (type != (const char *) NULL) clone_info->gradient.type=(GradientType) ParseCommandOption( MagickGradientOptions,MagickFalse,type); (void) CloneString(&clone_info->primitive,path); status=DrawImage(*pattern,clone_info,exception); clone_info=DestroyDrawInfo(clone_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"end pattern-path"); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D r a w P o l y g o n P r i m i t i v e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawPolygonPrimitive() draws a polygon on the image. % % The format of the DrawPolygonPrimitive method is: % % MagickBooleanType DrawPolygonPrimitive(Image *image, % const DrawInfo *draw_info,const PrimitiveInfo *primitive_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o primitive_info: Specifies a pointer to a PrimitiveInfo structure. % % o exception: return any errors or warnings in this structure. % */ static PolygonInfo **DestroyPolygonThreadSet(PolygonInfo **polygon_info) { register ssize_t i; assert(polygon_info != (PolygonInfo **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (polygon_info[i] != (PolygonInfo *) NULL) polygon_info[i]=DestroyPolygonInfo(polygon_info[i]); polygon_info=(PolygonInfo **) RelinquishMagickMemory(polygon_info); return(polygon_info); } static PolygonInfo **AcquirePolygonThreadSet( const PrimitiveInfo *primitive_info) { PathInfo *magick_restrict path_info; PolygonInfo **polygon_info; register ssize_t i; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); polygon_info=(PolygonInfo **) AcquireQuantumMemory(number_threads, sizeof(*polygon_info)); if (polygon_info == (PolygonInfo **) NULL) return((PolygonInfo **) NULL); (void) ResetMagickMemory(polygon_info,0,number_threads*sizeof(*polygon_info)); path_info=ConvertPrimitiveToPath(primitive_info); if (path_info == (PathInfo *) NULL) return(DestroyPolygonThreadSet(polygon_info)); for (i=0; i < (ssize_t) number_threads; i++) { polygon_info[i]=ConvertPathToPolygon(path_info); if (polygon_info[i] == (PolygonInfo *) NULL) return(DestroyPolygonThreadSet(polygon_info)); } path_info=(PathInfo *) RelinquishMagickMemory(path_info); return(polygon_info); } static double GetFillAlpha(PolygonInfo *polygon_info,const double mid, const MagickBooleanType fill,const FillRule fill_rule,const ssize_t x, const ssize_t y,double *stroke_alpha) { double alpha, beta, distance, subpath_alpha; PointInfo delta; register const PointInfo *q; register EdgeInfo *p; register ssize_t i; ssize_t j, winding_number; /* Compute fill & stroke opacity for this (x,y) point. */ *stroke_alpha=0.0; subpath_alpha=0.0; p=polygon_info->edges; for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++) { if ((double) y <= (p->bounds.y1-mid-0.5)) break; if ((double) y > (p->bounds.y2+mid+0.5)) { (void) DestroyEdge(polygon_info,(size_t) j); continue; } if (((double) x <= (p->bounds.x1-mid-0.5)) || ((double) x > (p->bounds.x2+mid+0.5))) continue; i=(ssize_t) MagickMax((double) p->highwater,1.0); for ( ; i < (ssize_t) p->number_points; i++) { if ((double) y <= (p->points[i-1].y-mid-0.5)) break; if ((double) y > (p->points[i].y+mid+0.5)) continue; if (p->scanline != (double) y) { p->scanline=(double) y; p->highwater=(size_t) i; } /* Compute distance between a point and an edge. */ q=p->points+i-1; delta.x=(q+1)->x-q->x; delta.y=(q+1)->y-q->y; beta=delta.x*(x-q->x)+delta.y*(y-q->y); if (beta < 0.0) { delta.x=(double) x-q->x; delta.y=(double) y-q->y; distance=delta.x*delta.x+delta.y*delta.y; } else { alpha=delta.x*delta.x+delta.y*delta.y; if (beta > alpha) { delta.x=(double) x-(q+1)->x; delta.y=(double) y-(q+1)->y; distance=delta.x*delta.x+delta.y*delta.y; } else { alpha=1.0/alpha; beta=delta.x*(y-q->y)-delta.y*(x-q->x); distance=alpha*beta*beta; } } /* Compute stroke & subpath opacity. */ beta=0.0; if (p->ghostline == MagickFalse) { alpha=mid+0.5; if ((*stroke_alpha < 1.0) && (distance <= ((alpha+0.25)*(alpha+0.25)))) { alpha=mid-0.5; if (distance <= ((alpha+0.25)*(alpha+0.25))) *stroke_alpha=1.0; else { beta=1.0; if (fabs(distance-1.0) >= DrawEpsilon) beta=sqrt((double) distance); alpha=beta-mid-0.5; if (*stroke_alpha < ((alpha-0.25)*(alpha-0.25))) *stroke_alpha=(alpha-0.25)*(alpha-0.25); } } } if ((fill == MagickFalse) || (distance > 1.0) || (subpath_alpha >= 1.0)) continue; if (distance <= 0.0) { subpath_alpha=1.0; continue; } if (distance > 1.0) continue; if (fabs(beta) < DrawEpsilon) { beta=1.0; if (fabs(distance-1.0) >= DrawEpsilon) beta=sqrt(distance); } alpha=beta-1.0; if (subpath_alpha < (alpha*alpha)) subpath_alpha=alpha*alpha; } } /* Compute fill opacity. */ if (fill == MagickFalse) return(0.0); if (subpath_alpha >= 1.0) return(1.0); /* Determine winding number. */ winding_number=0; p=polygon_info->edges; for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++) { if ((double) y <= p->bounds.y1) break; if (((double) y > p->bounds.y2) || ((double) x <= p->bounds.x1)) continue; if ((double) x > p->bounds.x2) { winding_number+=p->direction ? 1 : -1; continue; } i=(ssize_t) MagickMax((double) p->highwater,1.0); for ( ; i < (ssize_t) p->number_points; i++) if ((double) y <= p->points[i].y) break; q=p->points+i-1; if ((((q+1)->x-q->x)*(y-q->y)) <= (((q+1)->y-q->y)*(x-q->x))) winding_number+=p->direction ? 1 : -1; } if (fill_rule != NonZeroRule) { if ((MagickAbsoluteValue(winding_number) & 0x01) != 0) return(1.0); } else if (MagickAbsoluteValue(winding_number) != 0) return(1.0); return(subpath_alpha); } static MagickBooleanType DrawPolygonPrimitive(Image *image, const DrawInfo *draw_info,const PrimitiveInfo *primitive_info, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType fill, status; double mid; PolygonInfo **magick_restrict polygon_info; register EdgeInfo *p; register ssize_t i; SegmentInfo bounds; ssize_t start_y, stop_y, y; /* Compute bounding box. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (DrawInfo *) NULL); assert(draw_info->signature == MagickCoreSignature); assert(primitive_info != (PrimitiveInfo *) NULL); if (primitive_info->coordinates == 0) return(MagickTrue); polygon_info=AcquirePolygonThreadSet(primitive_info); if (polygon_info == (PolygonInfo **) NULL) return(MagickFalse); DisableMSCWarning(4127) if (0) DrawBoundingRectangles(image,draw_info,polygon_info[0],exception); RestoreMSCWarning if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-polygon"); fill=(primitive_info->method == FillToBorderMethod) || (primitive_info->method == FloodfillMethod) ? MagickTrue : MagickFalse; mid=ExpandAffine(&draw_info->affine)*draw_info->stroke_width/2.0; bounds=polygon_info[0]->edges[0].bounds; for (i=1; i < (ssize_t) polygon_info[0]->number_edges; i++) { p=polygon_info[0]->edges+i; if (p->bounds.x1 < bounds.x1) bounds.x1=p->bounds.x1; if (p->bounds.y1 < bounds.y1) bounds.y1=p->bounds.y1; if (p->bounds.x2 > bounds.x2) bounds.x2=p->bounds.x2; if (p->bounds.y2 > bounds.y2) bounds.y2=p->bounds.y2; } bounds.x1-=(mid+1.0); bounds.x1=bounds.x1 < 0.0 ? 0.0 : (size_t) ceil(bounds.x1-0.5) >= image->columns ? (double) image->columns-1 : bounds.x1; bounds.y1-=(mid+1.0); bounds.y1=bounds.y1 < 0.0 ? 0.0 : (size_t) ceil(bounds.y1-0.5) >= image->rows ? (double) image->rows-1 : bounds.y1; bounds.x2+=(mid+1.0); bounds.x2=bounds.x2 < 0.0 ? 0.0 : (size_t) floor(bounds.x2+0.5) >= image->columns ? (double) image->columns-1 : bounds.x2; bounds.y2+=(mid+1.0); bounds.y2=bounds.y2 < 0.0 ? 0.0 : (size_t) floor(bounds.y2+0.5) >= image->rows ? (double) image->rows-1 : bounds.y2; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); if ((primitive_info->coordinates == 1) || (polygon_info[0]->number_edges == 0)) { /* Draw point. */ start_y=(ssize_t) ceil(bounds.y1-0.5); stop_y=(ssize_t) floor(bounds.y2+0.5); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_number_threads(image,image,stop_y-start_y+1,1) #endif for (y=start_y; y <= stop_y; y++) { MagickBooleanType sync; PixelInfo pixel; register ssize_t x; register Quantum *magick_restrict q; ssize_t start_x, stop_x; if (status == MagickFalse) continue; start_x=(ssize_t) ceil(bounds.x1-0.5); stop_x=(ssize_t) floor(bounds.x2+0.5); x=start_x; q=GetCacheViewAuthenticPixels(image_view,x,y,(size_t) (stop_x-x+1),1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } GetPixelInfo(image,&pixel); for ( ; x <= stop_x; x++) { if ((x == (ssize_t) ceil(primitive_info->point.x-0.5)) && (y == (ssize_t) ceil(primitive_info->point.y-0.5))) { GetFillColor(draw_info,x-start_x,y-start_y,&pixel,exception); SetPixelViaPixelInfo(image,&pixel,q); } q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); polygon_info=DestroyPolygonThreadSet(polygon_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " end draw-polygon"); return(status); } /* Draw polygon or line. */ if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); start_y=(ssize_t) ceil(bounds.y1-0.5); stop_y=(ssize_t) floor(bounds.y2+0.5); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_number_threads(image,image,stop_y-start_y+1,1) #endif for (y=start_y; y <= stop_y; y++) { const int id = GetOpenMPThreadId(); double fill_alpha, stroke_alpha; PixelInfo fill_color, stroke_color; register Quantum *magick_restrict q; register ssize_t x; ssize_t start_x, stop_x; if (status == MagickFalse) continue; start_x=(ssize_t) ceil(bounds.x1-0.5); stop_x=(ssize_t) floor(bounds.x2+0.5); q=GetCacheViewAuthenticPixels(image_view,start_x,y,(size_t) (stop_x-start_x+ 1),1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=start_x; x <= stop_x; x++) { /* Fill and/or stroke. */ fill_alpha=GetFillAlpha(polygon_info[id],mid,fill,draw_info->fill_rule, x,y,&stroke_alpha); if (draw_info->stroke_antialias == MagickFalse) { fill_alpha=fill_alpha > 0.25 ? 1.0 : 0.0; stroke_alpha=stroke_alpha > 0.25 ? 1.0 : 0.0; } GetFillColor(draw_info,x-start_x,y-start_y,&fill_color,exception); fill_alpha=fill_alpha*fill_color.alpha; CompositePixelOver(image,&fill_color,fill_alpha,q,(double) GetPixelAlpha(image,q),q); GetStrokeColor(draw_info,x-start_x,y-start_y,&stroke_color,exception); stroke_alpha=stroke_alpha*stroke_color.alpha; CompositePixelOver(image,&stroke_color,stroke_alpha,q,(double) GetPixelAlpha(image,q),q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); polygon_info=DestroyPolygonThreadSet(polygon_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-polygon"); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w P r i m i t i v e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawPrimitive() draws a primitive (line, rectangle, ellipse) on the image. % % The format of the DrawPrimitive method is: % % MagickBooleanType DrawPrimitive(Image *image,const DrawInfo *draw_info, % PrimitiveInfo *primitive_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o primitive_info: Specifies a pointer to a PrimitiveInfo structure. % % o exception: return any errors or warnings in this structure. % */ static void LogPrimitiveInfo(const PrimitiveInfo *primitive_info) { const char *methods[] = { "point", "replace", "floodfill", "filltoborder", "reset", "?" }; PointInfo p, q, point; register ssize_t i, x; ssize_t coordinates, y; x=(ssize_t) ceil(primitive_info->point.x-0.5); y=(ssize_t) ceil(primitive_info->point.y-0.5); switch (primitive_info->primitive) { case AlphaPrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "AlphaPrimitive %.20g,%.20g %s",(double) x,(double) y, methods[primitive_info->method]); return; } case ColorPrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "ColorPrimitive %.20g,%.20g %s",(double) x,(double) y, methods[primitive_info->method]); return; } case ImagePrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "ImagePrimitive %.20g,%.20g",(double) x,(double) y); return; } case PointPrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "PointPrimitive %.20g,%.20g %s",(double) x,(double) y, methods[primitive_info->method]); return; } case TextPrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "TextPrimitive %.20g,%.20g",(double) x,(double) y); return; } default: break; } coordinates=0; p=primitive_info[0].point; q.x=(-1.0); q.y=(-1.0); for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) { point=primitive_info[i].point; if (coordinates <= 0) { coordinates=(ssize_t) primitive_info[i].coordinates; (void) LogMagickEvent(DrawEvent,GetMagickModule(), " begin open (%.20g)",(double) coordinates); p=point; } point=primitive_info[i].point; if ((fabs(q.x-point.x) >= DrawEpsilon) || (fabs(q.y-point.y) >= DrawEpsilon)) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " %.20g: %.18g,%.18g",(double) coordinates,point.x,point.y); else (void) LogMagickEvent(DrawEvent,GetMagickModule(), " %.20g: %g %g (duplicate)",(double) coordinates,point.x,point.y); q=point; coordinates--; if (coordinates > 0) continue; if ((fabs(p.x-point.x) >= DrawEpsilon) || (fabs(p.y-point.y) >= DrawEpsilon)) (void) LogMagickEvent(DrawEvent,GetMagickModule()," end last (%.20g)", (double) coordinates); else (void) LogMagickEvent(DrawEvent,GetMagickModule()," end open (%.20g)", (double) coordinates); } } MagickExport MagickBooleanType DrawPrimitive(Image *image, const DrawInfo *draw_info,const PrimitiveInfo *primitive_info, ExceptionInfo *exception) { CacheView *image_view; MagickStatusType status; register ssize_t i, x; ssize_t y; if (image->debug != MagickFalse) { (void) LogMagickEvent(DrawEvent,GetMagickModule(), " begin draw-primitive"); (void) LogMagickEvent(DrawEvent,GetMagickModule(), " affine: %g,%g,%g,%g,%g,%g",draw_info->affine.sx, draw_info->affine.rx,draw_info->affine.ry,draw_info->affine.sy, draw_info->affine.tx,draw_info->affine.ty); } if ((IsGrayColorspace(image->colorspace) != MagickFalse) && ((IsPixelInfoGray(&draw_info->fill) == MagickFalse) || (IsPixelInfoGray(&draw_info->stroke) == MagickFalse))) (void) SetImageColorspace(image,sRGBColorspace,exception); status=MagickTrue; x=(ssize_t) ceil(primitive_info->point.x-0.5); y=(ssize_t) ceil(primitive_info->point.y-0.5); image_view=AcquireAuthenticCacheView(image,exception); switch (primitive_info->primitive) { case AlphaPrimitive: { if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); switch (primitive_info->method) { case PointMethod: default: { PixelInfo pixel; register Quantum *q; q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception); if (q == (Quantum *) NULL) break; GetFillColor(draw_info,x,y,&pixel,exception); SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q); (void) SyncCacheViewAuthenticPixels(image_view,exception); break; } case ReplaceMethod: { MagickBooleanType sync; PixelInfo pixel, target; (void) GetOneCacheViewVirtualPixelInfo(image_view,x,y,&target, exception); GetPixelInfo(image,&pixel); for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image,q,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,&target) == MagickFalse) { q+=GetPixelChannels(image); continue; } GetFillColor(draw_info,x,y,&pixel,exception); SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) break; } break; } case FloodfillMethod: case FillToBorderMethod: { ChannelType channel_mask; PixelInfo target; (void) GetOneVirtualPixelInfo(image,TileVirtualPixelMethod,x,y, &target,exception); if (primitive_info->method == FillToBorderMethod) { target.red=(double) draw_info->border_color.red; target.green=(double) draw_info->border_color.green; target.blue=(double) draw_info->border_color.blue; } channel_mask=SetImageChannelMask(image,AlphaChannel); status&=FloodfillPaintImage(image,draw_info,&target,x,y, primitive_info->method == FloodfillMethod ? MagickFalse : MagickTrue,exception); (void) SetImageChannelMask(image,channel_mask); break; } case ResetMethod: { MagickBooleanType sync; PixelInfo pixel; for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { GetFillColor(draw_info,x,y,&pixel,exception); SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) break; } break; } } break; } case ColorPrimitive: { switch (primitive_info->method) { case PointMethod: default: { PixelInfo pixel; register Quantum *q; q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception); if (q == (Quantum *) NULL) break; GetPixelInfo(image,&pixel); GetFillColor(draw_info,x,y,&pixel,exception); SetPixelViaPixelInfo(image,&pixel,q); (void) SyncCacheViewAuthenticPixels(image_view,exception); break; } case ReplaceMethod: { MagickBooleanType sync; PixelInfo pixel, target; (void) GetOneCacheViewVirtualPixelInfo(image_view,x,y,&target, exception); for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image,q,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,&target) == MagickFalse) { q+=GetPixelChannels(image); continue; } GetFillColor(draw_info,x,y,&pixel,exception); SetPixelViaPixelInfo(image,&pixel,q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) break; } break; } case FloodfillMethod: case FillToBorderMethod: { PixelInfo target; (void) GetOneVirtualPixelInfo(image,TileVirtualPixelMethod,x,y, &target,exception); if (primitive_info->method == FillToBorderMethod) { target.red=(double) draw_info->border_color.red; target.green=(double) draw_info->border_color.green; target.blue=(double) draw_info->border_color.blue; } status&=FloodfillPaintImage(image,draw_info,&target,x,y, primitive_info->method == FloodfillMethod ? MagickFalse : MagickTrue,exception); break; } case ResetMethod: { MagickBooleanType sync; PixelInfo pixel; GetPixelInfo(image,&pixel); for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { GetFillColor(draw_info,x,y,&pixel,exception); SetPixelViaPixelInfo(image,&pixel,q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) break; } break; } } break; } case ImagePrimitive: { AffineMatrix affine; char composite_geometry[MagickPathExtent]; Image *composite_image; ImageInfo *clone_info; RectangleInfo geometry; ssize_t x1, y1; if (primitive_info->text == (char *) NULL) break; clone_info=AcquireImageInfo(); if (LocaleNCompare(primitive_info->text,"data:",5) == 0) composite_image=ReadInlineImage(clone_info,primitive_info->text, exception); else { (void) CopyMagickString(clone_info->filename,primitive_info->text, MagickPathExtent); composite_image=ReadImage(clone_info,exception); } clone_info=DestroyImageInfo(clone_info); if (composite_image == (Image *) NULL) break; (void) SetImageProgressMonitor(composite_image,(MagickProgressMonitor) NULL,(void *) NULL); x1=(ssize_t) ceil(primitive_info[1].point.x-0.5); y1=(ssize_t) ceil(primitive_info[1].point.y-0.5); if (((x1 != 0L) && (x1 != (ssize_t) composite_image->columns)) || ((y1 != 0L) && (y1 != (ssize_t) composite_image->rows))) { /* Resize image. */ (void) FormatLocaleString(composite_geometry,MagickPathExtent, "%gx%g!",primitive_info[1].point.x,primitive_info[1].point.y); composite_image->filter=image->filter; (void) TransformImage(&composite_image,(char *) NULL, composite_geometry,exception); } if (composite_image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(composite_image,OpaqueAlphaChannel, exception); if (draw_info->alpha != OpaqueAlpha) (void) SetImageAlpha(composite_image,draw_info->alpha,exception); SetGeometry(image,&geometry); image->gravity=draw_info->gravity; geometry.x=x; geometry.y=y; (void) FormatLocaleString(composite_geometry,MagickPathExtent, "%.20gx%.20g%+.20g%+.20g",(double) composite_image->columns,(double) composite_image->rows,(double) geometry.x,(double) geometry.y); (void) ParseGravityGeometry(image,composite_geometry,&geometry,exception); affine=draw_info->affine; affine.tx=(double) geometry.x; affine.ty=(double) geometry.y; composite_image->interpolate=image->interpolate; status&=DrawAffineImage(image,composite_image,&affine,exception); composite_image=DestroyImage(composite_image); break; } case PointPrimitive: { PixelInfo fill_color; register Quantum *q; if ((y < 0) || (y >= (ssize_t) image->rows)) break; if ((x < 0) || (x >= (ssize_t) image->columns)) break; q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception); if (q == (Quantum *) NULL) break; GetFillColor(draw_info,x,y,&fill_color,exception); CompositePixelOver(image,&fill_color,(double) fill_color.alpha,q, (double) GetPixelAlpha(image,q),q); (void) SyncCacheViewAuthenticPixels(image_view,exception); break; } case TextPrimitive: { char geometry[MagickPathExtent]; DrawInfo *clone_info; if (primitive_info->text == (char *) NULL) break; clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); (void) CloneString(&clone_info->text,primitive_info->text); (void) FormatLocaleString(geometry,MagickPathExtent,"%+f%+f", primitive_info->point.x,primitive_info->point.y); (void) CloneString(&clone_info->geometry,geometry); status&=AnnotateImage(image,clone_info,exception); clone_info=DestroyDrawInfo(clone_info); break; } default: { double mid, scale; DrawInfo *clone_info; if (IsEventLogging() != MagickFalse) LogPrimitiveInfo(primitive_info); scale=ExpandAffine(&draw_info->affine); if ((draw_info->dash_pattern != (double *) NULL) && (fabs(draw_info->dash_pattern[0]) >= DrawEpsilon) && (fabs(scale*draw_info->stroke_width) >= DrawEpsilon) && (draw_info->stroke.alpha != (Quantum) TransparentAlpha)) { /* Draw dash polygon. */ clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); clone_info->stroke_width=0.0; clone_info->stroke.alpha=(MagickRealType) TransparentAlpha; status&=DrawPolygonPrimitive(image,clone_info,primitive_info, exception); clone_info=DestroyDrawInfo(clone_info); (void) DrawDashPolygon(draw_info,primitive_info,image,exception); break; } mid=ExpandAffine(&draw_info->affine)*draw_info->stroke_width/2.0; if ((mid > 1.0) && ((draw_info->stroke.alpha != (Quantum) TransparentAlpha) || (draw_info->stroke_pattern != (Image *) NULL))) { MagickBooleanType closed_path; /* Draw strokes while respecting line cap/join attributes. */ for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ; closed_path= (fabs(primitive_info[i-1].point.x-primitive_info[0].point.x) < DrawEpsilon) && (fabs(primitive_info[i-1].point.y-primitive_info[0].point.y) < DrawEpsilon) ? MagickTrue : MagickFalse; i=(ssize_t) primitive_info[0].coordinates; if ((((draw_info->linecap == RoundCap) || (closed_path != MagickFalse)) && (draw_info->linejoin == RoundJoin)) || (primitive_info[i].primitive != UndefinedPrimitive)) { (void) DrawPolygonPrimitive(image,draw_info,primitive_info, exception); break; } clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); clone_info->stroke_width=0.0; clone_info->stroke.alpha=(MagickRealType) TransparentAlpha; status&=DrawPolygonPrimitive(image,clone_info,primitive_info, exception); clone_info=DestroyDrawInfo(clone_info); status&=DrawStrokePolygon(image,draw_info,primitive_info,exception); break; } status&=DrawPolygonPrimitive(image,draw_info,primitive_info,exception); break; } } image_view=DestroyCacheView(image_view); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-primitive"); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D r a w S t r o k e P o l y g o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawStrokePolygon() draws a stroked polygon (line, rectangle, ellipse) on % the image while respecting the line cap and join attributes. % % The format of the DrawStrokePolygon method is: % % MagickBooleanType DrawStrokePolygon(Image *image, % const DrawInfo *draw_info,const PrimitiveInfo *primitive_info) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o primitive_info: Specifies a pointer to a PrimitiveInfo structure. % % */ static void DrawRoundLinecap(Image *image,const DrawInfo *draw_info, const PrimitiveInfo *primitive_info,ExceptionInfo *exception) { PrimitiveInfo linecap[5]; register ssize_t i; for (i=0; i < 4; i++) linecap[i]=(*primitive_info); linecap[0].coordinates=4; linecap[1].point.x+=2.0*DrawEpsilon; linecap[2].point.x+=2.0*DrawEpsilon; linecap[2].point.y+=2.0*DrawEpsilon; linecap[3].point.y+=2.0*DrawEpsilon; linecap[4].primitive=UndefinedPrimitive; (void) DrawPolygonPrimitive(image,draw_info,linecap,exception); } static MagickBooleanType DrawStrokePolygon(Image *image, const DrawInfo *draw_info,const PrimitiveInfo *primitive_info, ExceptionInfo *exception) { DrawInfo *clone_info; MagickBooleanType closed_path; MagickStatusType status; PrimitiveInfo *stroke_polygon; register const PrimitiveInfo *p, *q; /* Draw stroked polygon. */ if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " begin draw-stroke-polygon"); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); clone_info->fill=draw_info->stroke; if (clone_info->fill_pattern != (Image *) NULL) clone_info->fill_pattern=DestroyImage(clone_info->fill_pattern); if (clone_info->stroke_pattern != (Image *) NULL) clone_info->fill_pattern=CloneImage(clone_info->stroke_pattern,0,0, MagickTrue,exception); clone_info->stroke.alpha=(MagickRealType) TransparentAlpha; clone_info->stroke_width=0.0; clone_info->fill_rule=NonZeroRule; status=MagickTrue; for (p=primitive_info; p->primitive != UndefinedPrimitive; p+=p->coordinates) { stroke_polygon=TraceStrokePolygon(draw_info,p); if (stroke_polygon == (PrimitiveInfo *) NULL) { status=0; break; } status&=DrawPolygonPrimitive(image,clone_info,stroke_polygon,exception); if (status == 0) break; stroke_polygon=(PrimitiveInfo *) RelinquishMagickMemory(stroke_polygon); q=p+p->coordinates-1; closed_path=(fabs(q->point.x-p->point.x) < DrawEpsilon) && (fabs(q->point.y-p->point.y) < DrawEpsilon) ? MagickTrue : MagickFalse; if ((draw_info->linecap == RoundCap) && (closed_path == MagickFalse)) { DrawRoundLinecap(image,draw_info,p,exception); DrawRoundLinecap(image,draw_info,q,exception); } } clone_info=DestroyDrawInfo(clone_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " end draw-stroke-polygon"); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t A f f i n e M a t r i x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAffineMatrix() returns an AffineMatrix initialized to the identity % matrix. % % The format of the GetAffineMatrix method is: % % void GetAffineMatrix(AffineMatrix *affine_matrix) % % A description of each parameter follows: % % o affine_matrix: the affine matrix. % */ MagickExport void GetAffineMatrix(AffineMatrix *affine_matrix) { (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(affine_matrix != (AffineMatrix *) NULL); (void) ResetMagickMemory(affine_matrix,0,sizeof(*affine_matrix)); affine_matrix->sx=1.0; affine_matrix->sy=1.0; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t D r a w I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetDrawInfo() initializes draw_info to default values from image_info. % % The format of the GetDrawInfo method is: % % void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info) % % A description of each parameter follows: % % o image_info: the image info.. % % o draw_info: the draw info. % */ MagickExport void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info) { char *next_token; const char *option; ExceptionInfo *exception; ImageInfo *clone_info; /* Initialize draw attributes. */ (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(draw_info != (DrawInfo *) NULL); (void) ResetMagickMemory(draw_info,0,sizeof(*draw_info)); clone_info=CloneImageInfo(image_info); GetAffineMatrix(&draw_info->affine); exception=AcquireExceptionInfo(); (void) QueryColorCompliance("#000F",AllCompliance,&draw_info->fill, exception); (void) QueryColorCompliance("#0000",AllCompliance,&draw_info->stroke, exception); draw_info->stroke_width=1.0; draw_info->fill_rule=EvenOddRule; draw_info->alpha=OpaqueAlpha; draw_info->fill_alpha=OpaqueAlpha; draw_info->stroke_alpha=OpaqueAlpha; draw_info->linecap=ButtCap; draw_info->linejoin=MiterJoin; draw_info->miterlimit=10; draw_info->decorate=NoDecoration; draw_info->pointsize=12.0; draw_info->undercolor.alpha=(MagickRealType) TransparentAlpha; draw_info->compose=OverCompositeOp; draw_info->render=MagickTrue; draw_info->debug=IsEventLogging(); draw_info->stroke_antialias=clone_info->antialias; if (clone_info->font != (char *) NULL) draw_info->font=AcquireString(clone_info->font); if (clone_info->density != (char *) NULL) draw_info->density=AcquireString(clone_info->density); draw_info->text_antialias=clone_info->antialias; if (fabs(clone_info->pointsize) >= DrawEpsilon) draw_info->pointsize=clone_info->pointsize; draw_info->border_color=clone_info->border_color; if (clone_info->server_name != (char *) NULL) draw_info->server_name=AcquireString(clone_info->server_name); option=GetImageOption(clone_info,"direction"); if (option != (const char *) NULL) draw_info->direction=(DirectionType) ParseCommandOption( MagickDirectionOptions,MagickFalse,option); else draw_info->direction=UndefinedDirection; option=GetImageOption(clone_info,"encoding"); if (option != (const char *) NULL) (void) CloneString(&draw_info->encoding,option); option=GetImageOption(clone_info,"family"); if (option != (const char *) NULL) (void) CloneString(&draw_info->family,option); option=GetImageOption(clone_info,"fill"); if (option != (const char *) NULL) (void) QueryColorCompliance(option,AllCompliance,&draw_info->fill, exception); option=GetImageOption(clone_info,"gravity"); if (option != (const char *) NULL) draw_info->gravity=(GravityType) ParseCommandOption(MagickGravityOptions, MagickFalse,option); option=GetImageOption(clone_info,"interline-spacing"); if (option != (const char *) NULL) draw_info->interline_spacing=StringToDouble(option,&next_token); option=GetImageOption(clone_info,"interword-spacing"); if (option != (const char *) NULL) draw_info->interword_spacing=StringToDouble(option,&next_token); option=GetImageOption(clone_info,"kerning"); if (option != (const char *) NULL) draw_info->kerning=StringToDouble(option,&next_token); option=GetImageOption(clone_info,"stroke"); if (option != (const char *) NULL) (void) QueryColorCompliance(option,AllCompliance,&draw_info->stroke, exception); option=GetImageOption(clone_info,"strokewidth"); if (option != (const char *) NULL) draw_info->stroke_width=StringToDouble(option,&next_token); option=GetImageOption(clone_info,"style"); if (option != (const char *) NULL) draw_info->style=(StyleType) ParseCommandOption(MagickStyleOptions, MagickFalse,option); option=GetImageOption(clone_info,"undercolor"); if (option != (const char *) NULL) (void) QueryColorCompliance(option,AllCompliance,&draw_info->undercolor, exception); option=GetImageOption(clone_info,"weight"); if (option != (const char *) NULL) { ssize_t weight; weight=ParseCommandOption(MagickWeightOptions,MagickFalse,option); if (weight == -1) weight=(ssize_t) StringToUnsignedLong(option); draw_info->weight=(size_t) weight; } exception=DestroyExceptionInfo(exception); draw_info->signature=MagickCoreSignature; clone_info=DestroyImageInfo(clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P e r m u t a t e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Permutate() returns the permuation of the (n,k). % % The format of the Permutate method is: % % void Permutate(ssize_t n,ssize_t k) % % A description of each parameter follows: % % o n: % % o k: % % */ static inline double Permutate(const ssize_t n,const ssize_t k) { double r; register ssize_t i; r=1.0; for (i=k+1; i <= n; i++) r*=i; for (i=1; i <= (n-k); i++) r/=i; return(r); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + T r a c e P r i m i t i v e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TracePrimitive is a collection of methods for generating graphic % primitives such as arcs, ellipses, paths, etc. % */ static void TraceArc(PrimitiveInfo *primitive_info,const PointInfo start, const PointInfo end,const PointInfo degrees) { PointInfo center, radii; center.x=0.5*(end.x+start.x); center.y=0.5*(end.y+start.y); radii.x=fabs(center.x-start.x); radii.y=fabs(center.y-start.y); TraceEllipse(primitive_info,center,radii,degrees); } static void TraceArcPath(PrimitiveInfo *primitive_info,const PointInfo start, const PointInfo end,const PointInfo arc,const double angle, const MagickBooleanType large_arc,const MagickBooleanType sweep) { double alpha, beta, delta, factor, gamma, theta; PointInfo center, points[3], radii; register double cosine, sine; register PrimitiveInfo *p; register ssize_t i; size_t arc_segments; if ((fabs(start.x-end.x) < DrawEpsilon) && (fabs(start.y-end.y) < DrawEpsilon)) { TracePoint(primitive_info,end); return; } radii.x=fabs(arc.x); radii.y=fabs(arc.y); if ((fabs(radii.x) < DrawEpsilon) || (fabs(radii.y) < DrawEpsilon)) { TraceLine(primitive_info,start,end); return; } cosine=cos(DegreesToRadians(fmod((double) angle,360.0))); sine=sin(DegreesToRadians(fmod((double) angle,360.0))); center.x=(double) (cosine*(end.x-start.x)/2+sine*(end.y-start.y)/2); center.y=(double) (cosine*(end.y-start.y)/2-sine*(end.x-start.x)/2); delta=(center.x*center.x)/(radii.x*radii.x)+(center.y*center.y)/ (radii.y*radii.y); if (delta < DrawEpsilon) { TraceLine(primitive_info,start,end); return; } if (delta > 1.0) { radii.x*=sqrt((double) delta); radii.y*=sqrt((double) delta); } points[0].x=(double) (cosine*start.x/radii.x+sine*start.y/radii.x); points[0].y=(double) (cosine*start.y/radii.y-sine*start.x/radii.y); points[1].x=(double) (cosine*end.x/radii.x+sine*end.y/radii.x); points[1].y=(double) (cosine*end.y/radii.y-sine*end.x/radii.y); alpha=points[1].x-points[0].x; beta=points[1].y-points[0].y; factor=PerceptibleReciprocal(alpha*alpha+beta*beta)-0.25; if (factor <= 0.0) factor=0.0; else { factor=sqrt((double) factor); if (sweep == large_arc) factor=(-factor); } center.x=(double) ((points[0].x+points[1].x)/2-factor*beta); center.y=(double) ((points[0].y+points[1].y)/2+factor*alpha); alpha=atan2(points[0].y-center.y,points[0].x-center.x); theta=atan2(points[1].y-center.y,points[1].x-center.x)-alpha; if ((theta < 0.0) && (sweep != MagickFalse)) theta+=2.0*MagickPI; else if ((theta > 0.0) && (sweep == MagickFalse)) theta-=2.0*MagickPI; arc_segments=(size_t) ceil(fabs((double) (theta/(0.5*MagickPI+DrawEpsilon)))); p=primitive_info; for (i=0; i < (ssize_t) arc_segments; i++) { beta=0.5*((alpha+(i+1)*theta/arc_segments)-(alpha+i*theta/arc_segments)); gamma=(8.0/3.0)*sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))* sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))/ sin(fmod((double) beta,DegreesToRadians(360.0))); points[0].x=(double) (center.x+cos(fmod((double) (alpha+(double) i*theta/ arc_segments),DegreesToRadians(360.0)))-gamma*sin(fmod((double) (alpha+ (double) i*theta/arc_segments),DegreesToRadians(360.0)))); points[0].y=(double) (center.y+sin(fmod((double) (alpha+(double) i*theta/ arc_segments),DegreesToRadians(360.0)))+gamma*cos(fmod((double) (alpha+ (double) i*theta/arc_segments),DegreesToRadians(360.0)))); points[2].x=(double) (center.x+cos(fmod((double) (alpha+(double) (i+1)* theta/arc_segments),DegreesToRadians(360.0)))); points[2].y=(double) (center.y+sin(fmod((double) (alpha+(double) (i+1)* theta/arc_segments),DegreesToRadians(360.0)))); points[1].x=(double) (points[2].x+gamma*sin(fmod((double) (alpha+(double) (i+1)*theta/arc_segments),DegreesToRadians(360.0)))); points[1].y=(double) (points[2].y-gamma*cos(fmod((double) (alpha+(double) (i+1)*theta/arc_segments),DegreesToRadians(360.0)))); p->point.x=(p == primitive_info) ? start.x : (p-1)->point.x; p->point.y=(p == primitive_info) ? start.y : (p-1)->point.y; (p+1)->point.x=(double) (cosine*radii.x*points[0].x-sine*radii.y* points[0].y); (p+1)->point.y=(double) (sine*radii.x*points[0].x+cosine*radii.y* points[0].y); (p+2)->point.x=(double) (cosine*radii.x*points[1].x-sine*radii.y* points[1].y); (p+2)->point.y=(double) (sine*radii.x*points[1].x+cosine*radii.y* points[1].y); (p+3)->point.x=(double) (cosine*radii.x*points[2].x-sine*radii.y* points[2].y); (p+3)->point.y=(double) (sine*radii.x*points[2].x+cosine*radii.y* points[2].y); if (i == (ssize_t) (arc_segments-1)) (p+3)->point=end; TraceBezier(p,4); p+=p->coordinates; } primitive_info->coordinates=(size_t) (p-primitive_info); for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } } static void TraceBezier(PrimitiveInfo *primitive_info, const size_t number_coordinates) { double alpha, *coefficients, weight; PointInfo end, point, *points; register PrimitiveInfo *p; register ssize_t i, j; size_t control_points, quantum; /* Allocate coeficients. */ quantum=number_coordinates; for (i=0; i < (ssize_t) number_coordinates; i++) { for (j=i+1; j < (ssize_t) number_coordinates; j++) { alpha=fabs(primitive_info[j].point.x-primitive_info[i].point.x); if (alpha > (double) quantum) quantum=(size_t) alpha; alpha=fabs(primitive_info[j].point.y-primitive_info[i].point.y); if (alpha > (double) quantum) quantum=(size_t) alpha; } } quantum=(size_t) MagickMin((double) quantum/number_coordinates, (double) BezierQuantum); control_points=quantum*number_coordinates; coefficients=(double *) AcquireQuantumMemory((size_t) number_coordinates,sizeof(*coefficients)); points=(PointInfo *) AcquireQuantumMemory((size_t) control_points, sizeof(*points)); if ((coefficients == (double *) NULL) || (points == (PointInfo *) NULL)) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); /* Compute bezier points. */ end=primitive_info[number_coordinates-1].point; for (i=0; i < (ssize_t) number_coordinates; i++) coefficients[i]=Permutate((ssize_t) number_coordinates-1,i); weight=0.0; for (i=0; i < (ssize_t) control_points; i++) { p=primitive_info; point.x=0.0; point.y=0.0; alpha=pow((double) (1.0-weight),(double) number_coordinates-1.0); for (j=0; j < (ssize_t) number_coordinates; j++) { point.x+=alpha*coefficients[j]*p->point.x; point.y+=alpha*coefficients[j]*p->point.y; alpha*=weight/(1.0-weight); p++; } points[i]=point; weight+=1.0/control_points; } /* Bezier curves are just short segmented polys. */ p=primitive_info; for (i=0; i < (ssize_t) control_points; i++) { TracePoint(p,points[i]); p+=p->coordinates; } TracePoint(p,end); p+=p->coordinates; primitive_info->coordinates=(size_t) (p-primitive_info); for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } points=(PointInfo *) RelinquishMagickMemory(points); coefficients=(double *) RelinquishMagickMemory(coefficients); } static void TraceCircle(PrimitiveInfo *primitive_info,const PointInfo start, const PointInfo end) { double alpha, beta, radius; PointInfo offset, degrees; alpha=end.x-start.x; beta=end.y-start.y; radius=hypot((double) alpha,(double) beta); offset.x=(double) radius; offset.y=(double) radius; degrees.x=0.0; degrees.y=360.0; TraceEllipse(primitive_info,start,offset,degrees); } static void TraceEllipse(PrimitiveInfo *primitive_info,const PointInfo start, const PointInfo stop,const PointInfo degrees) { double delta, step, y; PointInfo angle, point; register PrimitiveInfo *p; register ssize_t i; /* Ellipses are just short segmented polys. */ if ((fabs(stop.x) < DrawEpsilon) && (fabs(stop.y) < DrawEpsilon)) { TracePoint(primitive_info,start); return; } delta=2.0/MagickMax(stop.x,stop.y); step=MagickPI/8.0; if ((delta >= 0.0) && (delta < (MagickPI/8.0))) step=MagickPI/(4*(MagickPI/delta/2+0.5)); angle.x=DegreesToRadians(degrees.x); y=degrees.y; while (y < degrees.x) y+=360.0; angle.y=DegreesToRadians(y); for (p=primitive_info; angle.x < angle.y; angle.x+=step) { point.x=cos(fmod(angle.x,DegreesToRadians(360.0)))*stop.x+start.x; point.y=sin(fmod(angle.x,DegreesToRadians(360.0)))*stop.y+start.y; TracePoint(p,point); p+=p->coordinates; } point.x=cos(fmod(angle.y,DegreesToRadians(360.0)))*stop.x+start.x; point.y=sin(fmod(angle.y,DegreesToRadians(360.0)))*stop.y+start.y; TracePoint(p,point); p+=p->coordinates; primitive_info->coordinates=(size_t) (p-primitive_info); for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } } static void TraceLine(PrimitiveInfo *primitive_info,const PointInfo start, const PointInfo end) { TracePoint(primitive_info,start); if ((fabs(start.x-end.x) < DrawEpsilon) && (fabs(start.y-end.y) < DrawEpsilon)) { primitive_info->primitive=PointPrimitive; primitive_info->coordinates=1; return; } TracePoint(primitive_info+1,end); (primitive_info+1)->primitive=primitive_info->primitive; primitive_info->coordinates=2; } static size_t TracePath(PrimitiveInfo *primitive_info,const char *path) { char *next_token, token[MagickPathExtent]; const char *p; double x, y; int attribute, last_attribute; PointInfo end = {0.0, 0.0}, points[4] = { {0.0,0.0}, {0.0,0.0}, {0.0,0.0}, {0.0,0.0} }, point = {0.0, 0.0}, start = {0.0, 0.0}; PrimitiveType primitive_type; register PrimitiveInfo *q; register ssize_t i; size_t number_coordinates, z_count; attribute=0; number_coordinates=0; z_count=0; primitive_type=primitive_info->primitive; q=primitive_info; for (p=path; *p != '\0'; ) { while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == '\0') break; last_attribute=attribute; attribute=(int) (*p++); switch (attribute) { case 'a': case 'A': { double angle; MagickBooleanType large_arc, sweep; PointInfo arc; /* Compute arc points. */ do { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); arc.x=StringToDouble(token,&next_token); GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); arc.y=StringToDouble(token,&next_token); GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); angle=StringToDouble(token,&next_token); GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); large_arc=StringToLong(token) != 0 ? MagickTrue : MagickFalse; GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); sweep=StringToLong(token) != 0 ? MagickTrue : MagickFalse; GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); x=StringToDouble(token,&next_token); GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); y=StringToDouble(token,&next_token); end.x=(double) (attribute == (int) 'A' ? x : point.x+x); end.y=(double) (attribute == (int) 'A' ? y : point.y+y); TraceArcPath(q,point,end,arc,angle,large_arc,sweep); q+=q->coordinates; point=end; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'c': case 'C': { /* Compute bezier points. */ do { points[0]=point; for (i=1; i < 4; i++) { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); x=StringToDouble(token,&next_token); GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); y=StringToDouble(token,&next_token); end.x=(double) (attribute == (int) 'C' ? x : point.x+x); end.y=(double) (attribute == (int) 'C' ? y : point.y+y); points[i]=end; } for (i=0; i < 4; i++) (q+i)->point=points[i]; TraceBezier(q,4); q+=q->coordinates; point=end; } while (IsPoint(p) != MagickFalse); break; } case 'H': case 'h': { do { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); x=StringToDouble(token,&next_token); point.x=(double) (attribute == (int) 'H' ? x: point.x+x); TracePoint(q,point); q+=q->coordinates; } while (IsPoint(p) != MagickFalse); break; } case 'l': case 'L': { do { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); x=StringToDouble(token,&next_token); GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); y=StringToDouble(token,&next_token); point.x=(double) (attribute == (int) 'L' ? x : point.x+x); point.y=(double) (attribute == (int) 'L' ? y : point.y+y); TracePoint(q,point); q+=q->coordinates; } while (IsPoint(p) != MagickFalse); break; } case 'M': case 'm': { if (q != primitive_info) { primitive_info->coordinates=(size_t) (q-primitive_info); number_coordinates+=primitive_info->coordinates; primitive_info=q; } i=0; do { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); x=StringToDouble(token,&next_token); GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); y=StringToDouble(token,&next_token); point.x=(double) (attribute == (int) 'M' ? x : point.x+x); point.y=(double) (attribute == (int) 'M' ? y : point.y+y); if (i == 0) start=point; i++; TracePoint(q,point); q+=q->coordinates; if ((i != 0) && (attribute == (int) 'M')) { TracePoint(q,point); q+=q->coordinates; } } while (IsPoint(p) != MagickFalse); break; } case 'q': case 'Q': { /* Compute bezier points. */ do { points[0]=point; for (i=1; i < 3; i++) { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); x=StringToDouble(token,&next_token); GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); y=StringToDouble(token,&next_token); if (*p == ',') p++; end.x=(double) (attribute == (int) 'Q' ? x : point.x+x); end.y=(double) (attribute == (int) 'Q' ? y : point.y+y); points[i]=end; } for (i=0; i < 3; i++) (q+i)->point=points[i]; TraceBezier(q,3); q+=q->coordinates; point=end; } while (IsPoint(p) != MagickFalse); break; } case 's': case 'S': { /* Compute bezier points. */ do { points[0]=points[3]; points[1].x=2.0*points[3].x-points[2].x; points[1].y=2.0*points[3].y-points[2].y; for (i=2; i < 4; i++) { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); x=StringToDouble(token,&next_token); GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); y=StringToDouble(token,&next_token); if (*p == ',') p++; end.x=(double) (attribute == (int) 'S' ? x : point.x+x); end.y=(double) (attribute == (int) 'S' ? y : point.y+y); points[i]=end; } if (strchr("CcSs",last_attribute) == (char *) NULL) { points[0]=point; points[1]=point; } for (i=0; i < 4; i++) (q+i)->point=points[i]; TraceBezier(q,4); q+=q->coordinates; point=end; } while (IsPoint(p) != MagickFalse); break; } case 't': case 'T': { /* Compute bezier points. */ do { points[0]=points[2]; points[1].x=2.0*points[2].x-points[1].x; points[1].y=2.0*points[2].y-points[1].y; for (i=2; i < 3; i++) { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); x=StringToDouble(token,&next_token); GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); y=StringToDouble(token,&next_token); end.x=(double) (attribute == (int) 'T' ? x : point.x+x); end.y=(double) (attribute == (int) 'T' ? y : point.y+y); points[i]=end; } if (strchr("QqTt",last_attribute) == (char *) NULL) { points[0]=point; points[1]=point; } for (i=0; i < 3; i++) (q+i)->point=points[i]; TraceBezier(q,3); q+=q->coordinates; point=end; } while (IsPoint(p) != MagickFalse); break; } case 'v': case 'V': { do { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); y=StringToDouble(token,&next_token); point.y=(double) (attribute == (int) 'V' ? y : point.y+y); TracePoint(q,point); q+=q->coordinates; } while (IsPoint(p) != MagickFalse); break; } case 'z': case 'Z': { point=start; TracePoint(q,point); q+=q->coordinates; primitive_info->coordinates=(size_t) (q-primitive_info); number_coordinates+=primitive_info->coordinates; primitive_info=q; z_count++; break; } default: { if (isalpha((int) ((unsigned char) attribute)) != 0) (void) FormatLocaleFile(stderr,"attribute not recognized: %c\n", attribute); break; } } } primitive_info->coordinates=(size_t) (q-primitive_info); number_coordinates+=primitive_info->coordinates; for (i=0; i < (ssize_t) number_coordinates; i++) { q--; q->primitive=primitive_type; if (z_count > 1) q->method=FillToBorderMethod; } q=primitive_info; return(number_coordinates); } static void TraceRectangle(PrimitiveInfo *primitive_info,const PointInfo start, const PointInfo end) { PointInfo point; register PrimitiveInfo *p; register ssize_t i; p=primitive_info; TracePoint(p,start); p+=p->coordinates; point.x=start.x; point.y=end.y; TracePoint(p,point); p+=p->coordinates; TracePoint(p,end); p+=p->coordinates; point.x=end.x; point.y=start.y; TracePoint(p,point); p+=p->coordinates; TracePoint(p,start); p+=p->coordinates; primitive_info->coordinates=(size_t) (p-primitive_info); for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } } static void TraceRoundRectangle(PrimitiveInfo *primitive_info, const PointInfo start,const PointInfo end,PointInfo arc) { PointInfo degrees, offset, point; register PrimitiveInfo *p; register ssize_t i; p=primitive_info; offset.x=fabs(end.x-start.x); offset.y=fabs(end.y-start.y); if (arc.x > (0.5*offset.x)) arc.x=0.5*offset.x; if (arc.y > (0.5*offset.y)) arc.y=0.5*offset.y; point.x=start.x+offset.x-arc.x; point.y=start.y+arc.y; degrees.x=270.0; degrees.y=360.0; TraceEllipse(p,point,arc,degrees); p+=p->coordinates; point.x=start.x+offset.x-arc.x; point.y=start.y+offset.y-arc.y; degrees.x=0.0; degrees.y=90.0; TraceEllipse(p,point,arc,degrees); p+=p->coordinates; point.x=start.x+arc.x; point.y=start.y+offset.y-arc.y; degrees.x=90.0; degrees.y=180.0; TraceEllipse(p,point,arc,degrees); p+=p->coordinates; point.x=start.x+arc.x; point.y=start.y+arc.y; degrees.x=180.0; degrees.y=270.0; TraceEllipse(p,point,arc,degrees); p+=p->coordinates; TracePoint(p,primitive_info->point); p+=p->coordinates; primitive_info->coordinates=(size_t) (p-primitive_info); for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } } static void TraceSquareLinecap(PrimitiveInfo *primitive_info, const size_t number_vertices,const double offset) { double distance; register double dx, dy; register ssize_t i; ssize_t j; dx=0.0; dy=0.0; for (i=1; i < (ssize_t) number_vertices; i++) { dx=primitive_info[0].point.x-primitive_info[i].point.x; dy=primitive_info[0].point.y-primitive_info[i].point.y; if ((fabs((double) dx) >= DrawEpsilon) || (fabs((double) dy) >= DrawEpsilon)) break; } if (i == (ssize_t) number_vertices) i=(ssize_t) number_vertices-1L; distance=hypot((double) dx,(double) dy); primitive_info[0].point.x=(double) (primitive_info[i].point.x+ dx*(distance+offset)/distance); primitive_info[0].point.y=(double) (primitive_info[i].point.y+ dy*(distance+offset)/distance); for (j=(ssize_t) number_vertices-2; j >= 0; j--) { dx=primitive_info[number_vertices-1].point.x-primitive_info[j].point.x; dy=primitive_info[number_vertices-1].point.y-primitive_info[j].point.y; if ((fabs((double) dx) >= DrawEpsilon) || (fabs((double) dy) >= DrawEpsilon)) break; } distance=hypot((double) dx,(double) dy); primitive_info[number_vertices-1].point.x=(double) (primitive_info[j].point.x+ dx*(distance+offset)/distance); primitive_info[number_vertices-1].point.y=(double) (primitive_info[j].point.y+ dy*(distance+offset)/distance); } static PrimitiveInfo *TraceStrokePolygon(const DrawInfo *draw_info, const PrimitiveInfo *primitive_info) { typedef struct _LineSegment { double p, q; } LineSegment; double delta_theta, dot_product, mid, miterlimit; LineSegment dx, dy, inverse_slope, slope, theta; MagickBooleanType closed_path; PointInfo box_p[5], box_q[5], center, offset, *path_p, *path_q; PrimitiveInfo *polygon_primitive, *stroke_polygon; register ssize_t i; size_t arc_segments, max_strokes, number_vertices; ssize_t j, n, p, q; /* Allocate paths. */ number_vertices=primitive_info->coordinates; max_strokes=2*number_vertices+6*BezierQuantum+360; path_p=(PointInfo *) AcquireQuantumMemory((size_t) max_strokes, sizeof(*path_p)); path_q=(PointInfo *) AcquireQuantumMemory((size_t) max_strokes, sizeof(*path_q)); polygon_primitive=(PrimitiveInfo *) AcquireQuantumMemory((size_t) number_vertices+2UL,sizeof(*polygon_primitive)); if ((path_p == (PointInfo *) NULL) || (path_q == (PointInfo *) NULL) || (polygon_primitive == (PrimitiveInfo *) NULL)) { if (path_p != (PointInfo *) NULL) path_p=(PointInfo *) RelinquishMagickMemory(path_p); if (path_q != (PointInfo *) NULL) path_q=(PointInfo *) RelinquishMagickMemory(path_q); if (polygon_primitive != (PrimitiveInfo *) NULL) polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory( polygon_primitive); return((PrimitiveInfo *) NULL); } (void) CopyMagickMemory(polygon_primitive,primitive_info,(size_t) number_vertices*sizeof(*polygon_primitive)); closed_path= (fabs(primitive_info[number_vertices-1].point.x-primitive_info[0].point.x) < DrawEpsilon) && (fabs(primitive_info[number_vertices-1].point.y-primitive_info[0].point.y) < DrawEpsilon) ? MagickTrue : MagickFalse; if (((draw_info->linejoin == RoundJoin) || (draw_info->linejoin == MiterJoin)) && (closed_path != MagickFalse)) { polygon_primitive[number_vertices]=primitive_info[1]; number_vertices++; } polygon_primitive[number_vertices].primitive=UndefinedPrimitive; /* Compute the slope for the first line segment, p. */ dx.p=0.0; dy.p=0.0; for (n=1; n < (ssize_t) number_vertices; n++) { dx.p=polygon_primitive[n].point.x-polygon_primitive[0].point.x; dy.p=polygon_primitive[n].point.y-polygon_primitive[0].point.y; if ((fabs(dx.p) >= DrawEpsilon) || (fabs(dy.p) >= DrawEpsilon)) break; } if (n == (ssize_t) number_vertices) n=(ssize_t) number_vertices-1L; slope.p=0.0; inverse_slope.p=0.0; if (fabs(dx.p) < DrawEpsilon) { if (dx.p >= 0.0) slope.p=dy.p < 0.0 ? -1.0/DrawEpsilon : 1.0/DrawEpsilon; else slope.p=dy.p < 0.0 ? 1.0/DrawEpsilon : -1.0/DrawEpsilon; } else if (fabs(dy.p) < DrawEpsilon) { if (dy.p >= 0.0) inverse_slope.p=dx.p < 0.0 ? -1.0/DrawEpsilon : 1.0/DrawEpsilon; else inverse_slope.p=dx.p < 0.0 ? 1.0/DrawEpsilon : -1.0/DrawEpsilon; } else { slope.p=dy.p/dx.p; inverse_slope.p=(-1.0/slope.p); } mid=ExpandAffine(&draw_info->affine)*draw_info->stroke_width/2.0; miterlimit=(double) (draw_info->miterlimit*draw_info->miterlimit*mid*mid); if ((draw_info->linecap == SquareCap) && (closed_path == MagickFalse)) TraceSquareLinecap(polygon_primitive,number_vertices,mid); offset.x=sqrt((double) (mid*mid/(inverse_slope.p*inverse_slope.p+1.0))); offset.y=(double) (offset.x*inverse_slope.p); if ((dy.p*offset.x-dx.p*offset.y) > 0.0) { box_p[0].x=polygon_primitive[0].point.x-offset.x; box_p[0].y=polygon_primitive[0].point.y-offset.x*inverse_slope.p; box_p[1].x=polygon_primitive[n].point.x-offset.x; box_p[1].y=polygon_primitive[n].point.y-offset.x*inverse_slope.p; box_q[0].x=polygon_primitive[0].point.x+offset.x; box_q[0].y=polygon_primitive[0].point.y+offset.x*inverse_slope.p; box_q[1].x=polygon_primitive[n].point.x+offset.x; box_q[1].y=polygon_primitive[n].point.y+offset.x*inverse_slope.p; } else { box_p[0].x=polygon_primitive[0].point.x+offset.x; box_p[0].y=polygon_primitive[0].point.y+offset.y; box_p[1].x=polygon_primitive[n].point.x+offset.x; box_p[1].y=polygon_primitive[n].point.y+offset.y; box_q[0].x=polygon_primitive[0].point.x-offset.x; box_q[0].y=polygon_primitive[0].point.y-offset.y; box_q[1].x=polygon_primitive[n].point.x-offset.x; box_q[1].y=polygon_primitive[n].point.y-offset.y; } /* Create strokes for the line join attribute: bevel, miter, round. */ p=0; q=0; path_q[p++]=box_q[0]; path_p[q++]=box_p[0]; for (i=(ssize_t) n+1; i < (ssize_t) number_vertices; i++) { /* Compute the slope for this line segment, q. */ dx.q=polygon_primitive[i].point.x-polygon_primitive[n].point.x; dy.q=polygon_primitive[i].point.y-polygon_primitive[n].point.y; dot_product=dx.q*dx.q+dy.q*dy.q; if (dot_product < 0.25) continue; slope.q=0.0; inverse_slope.q=0.0; if (fabs(dx.q) < DrawEpsilon) { if (dx.q >= 0.0) slope.q=dy.q < 0.0 ? -1.0/DrawEpsilon : 1.0/DrawEpsilon; else slope.q=dy.q < 0.0 ? 1.0/DrawEpsilon : -1.0/DrawEpsilon; } else if (fabs(dy.q) < DrawEpsilon) { if (dy.q >= 0.0) inverse_slope.q=dx.q < 0.0 ? -1.0/DrawEpsilon : 1.0/DrawEpsilon; else inverse_slope.q=dx.q < 0.0 ? 1.0/DrawEpsilon : -1.0/DrawEpsilon; } else { slope.q=dy.q/dx.q; inverse_slope.q=(-1.0/slope.q); } offset.x=sqrt((double) (mid*mid/(inverse_slope.q*inverse_slope.q+1.0))); offset.y=(double) (offset.x*inverse_slope.q); dot_product=dy.q*offset.x-dx.q*offset.y; if (dot_product > 0.0) { box_p[2].x=polygon_primitive[n].point.x-offset.x; box_p[2].y=polygon_primitive[n].point.y-offset.y; box_p[3].x=polygon_primitive[i].point.x-offset.x; box_p[3].y=polygon_primitive[i].point.y-offset.y; box_q[2].x=polygon_primitive[n].point.x+offset.x; box_q[2].y=polygon_primitive[n].point.y+offset.y; box_q[3].x=polygon_primitive[i].point.x+offset.x; box_q[3].y=polygon_primitive[i].point.y+offset.y; } else { box_p[2].x=polygon_primitive[n].point.x+offset.x; box_p[2].y=polygon_primitive[n].point.y+offset.y; box_p[3].x=polygon_primitive[i].point.x+offset.x; box_p[3].y=polygon_primitive[i].point.y+offset.y; box_q[2].x=polygon_primitive[n].point.x-offset.x; box_q[2].y=polygon_primitive[n].point.y-offset.y; box_q[3].x=polygon_primitive[i].point.x-offset.x; box_q[3].y=polygon_primitive[i].point.y-offset.y; } if (fabs((double) (slope.p-slope.q)) < DrawEpsilon) { box_p[4]=box_p[1]; box_q[4]=box_q[1]; } else { box_p[4].x=(double) ((slope.p*box_p[0].x-box_p[0].y-slope.q*box_p[3].x+ box_p[3].y)/(slope.p-slope.q)); box_p[4].y=(double) (slope.p*(box_p[4].x-box_p[0].x)+box_p[0].y); box_q[4].x=(double) ((slope.p*box_q[0].x-box_q[0].y-slope.q*box_q[3].x+ box_q[3].y)/(slope.p-slope.q)); box_q[4].y=(double) (slope.p*(box_q[4].x-box_q[0].x)+box_q[0].y); } if (q >= (ssize_t) (max_strokes-6*BezierQuantum-360)) { if (~max_strokes < (6*BezierQuantum+360)) { path_p=(PointInfo *) RelinquishMagickMemory(path_p); path_q=(PointInfo *) RelinquishMagickMemory(path_q); } else { max_strokes+=6*BezierQuantum+360; path_p=(PointInfo *) ResizeQuantumMemory(path_p,max_strokes, sizeof(*path_p)); path_q=(PointInfo *) ResizeQuantumMemory(path_q,max_strokes, sizeof(*path_q)); } if ((path_p == (PointInfo *) NULL) || (path_q == (PointInfo *) NULL)) { if (path_p != (PointInfo *) NULL) path_p=(PointInfo *) RelinquishMagickMemory(path_p); if (path_q != (PointInfo *) NULL) path_q=(PointInfo *) RelinquishMagickMemory(path_q); polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(polygon_primitive); return((PrimitiveInfo *) NULL); } } dot_product=dx.q*dy.p-dx.p*dy.q; if (dot_product <= 0.0) switch (draw_info->linejoin) { case BevelJoin: { path_q[q++]=box_q[1]; path_q[q++]=box_q[2]; dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) path_p[p++]=box_p[4]; else { path_p[p++]=box_p[1]; path_p[p++]=box_p[2]; } break; } case MiterJoin: { dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) { path_q[q++]=box_q[4]; path_p[p++]=box_p[4]; } else { path_q[q++]=box_q[1]; path_q[q++]=box_q[2]; path_p[p++]=box_p[1]; path_p[p++]=box_p[2]; } break; } case RoundJoin: { dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) path_p[p++]=box_p[4]; else { path_p[p++]=box_p[1]; path_p[p++]=box_p[2]; } center=polygon_primitive[n].point; theta.p=atan2(box_q[1].y-center.y,box_q[1].x-center.x); theta.q=atan2(box_q[2].y-center.y,box_q[2].x-center.x); if (theta.q < theta.p) theta.q+=2.0*MagickPI; arc_segments=(size_t) ceil((double) ((theta.q-theta.p)/ (2.0*sqrt((double) (1.0/mid))))); path_q[q].x=box_q[1].x; path_q[q].y=box_q[1].y; q++; for (j=1; j < (ssize_t) arc_segments; j++) { delta_theta=(double) (j*(theta.q-theta.p)/arc_segments); path_q[q].x=(double) (center.x+mid*cos(fmod((double) (theta.p+delta_theta),DegreesToRadians(360.0)))); path_q[q].y=(double) (center.y+mid*sin(fmod((double) (theta.p+delta_theta),DegreesToRadians(360.0)))); q++; } path_q[q++]=box_q[2]; break; } default: break; } else switch (draw_info->linejoin) { case BevelJoin: { path_p[p++]=box_p[1]; path_p[p++]=box_p[2]; dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) path_q[q++]=box_q[4]; else { path_q[q++]=box_q[1]; path_q[q++]=box_q[2]; } break; } case MiterJoin: { dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) { path_q[q++]=box_q[4]; path_p[p++]=box_p[4]; } else { path_q[q++]=box_q[1]; path_q[q++]=box_q[2]; path_p[p++]=box_p[1]; path_p[p++]=box_p[2]; } break; } case RoundJoin: { dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) path_q[q++]=box_q[4]; else { path_q[q++]=box_q[1]; path_q[q++]=box_q[2]; } center=polygon_primitive[n].point; theta.p=atan2(box_p[1].y-center.y,box_p[1].x-center.x); theta.q=atan2(box_p[2].y-center.y,box_p[2].x-center.x); if (theta.p < theta.q) theta.p+=2.0*MagickPI; arc_segments=(size_t) ceil((double) ((theta.p-theta.q)/ (2.0*sqrt((double) (1.0/mid))))); path_p[p++]=box_p[1]; for (j=1; j < (ssize_t) arc_segments; j++) { delta_theta=(double) (j*(theta.q-theta.p)/arc_segments); path_p[p].x=(double) (center.x+mid*cos(fmod((double) (theta.p+delta_theta),DegreesToRadians(360.0)))); path_p[p].y=(double) (center.y+mid*sin(fmod((double) (theta.p+delta_theta),DegreesToRadians(360.0)))); p++; } path_p[p++]=box_p[2]; break; } default: break; } slope.p=slope.q; inverse_slope.p=inverse_slope.q; box_p[0]=box_p[2]; box_p[1]=box_p[3]; box_q[0]=box_q[2]; box_q[1]=box_q[3]; dx.p=dx.q; dy.p=dy.q; n=i; } path_p[p++]=box_p[1]; path_q[q++]=box_q[1]; /* Trace stroked polygon. */ stroke_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t) (p+q+2UL*closed_path+2UL),sizeof(*stroke_polygon)); if (stroke_polygon != (PrimitiveInfo *) NULL) { for (i=0; i < (ssize_t) p; i++) { stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=path_p[i]; } if (closed_path != MagickFalse) { stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=stroke_polygon[0].point; i++; } for ( ; i < (ssize_t) (p+q+closed_path); i++) { stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=path_q[p+q+closed_path-(i+1)]; } if (closed_path != MagickFalse) { stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=stroke_polygon[p+closed_path].point; i++; } stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=stroke_polygon[0].point; i++; stroke_polygon[i].primitive=UndefinedPrimitive; stroke_polygon[0].coordinates=(size_t) (p+q+2*closed_path+1); } path_p=(PointInfo *) RelinquishMagickMemory(path_p); path_q=(PointInfo *) RelinquishMagickMemory(path_q); polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(polygon_primitive); return(stroke_polygon); }
special_random_ops.h
// // @author raver119@gmail.com // #ifndef LIBND4J_SPECIAL_RANDOM_OPS_H #define LIBND4J_SPECIAL_RANDOM_OPS_H #include <ops/random_ops.h> #include <helpers/shape.h> namespace randomOps { ////////////////////////////////////////////////////////////////////// template<typename T> class Choice { public: method_idx method_X method_XY static const bool requiresSpecial = true; #ifdef __CUDACC__ __device__ static inline void specialOpCuda(Nd4jPointer state, T *x, Nd4jLong *xShapeBuffer, T *y, Nd4jLong *yShapeBuffer, T *z, Nd4jLong *zShapeBuffer, T *extraArguments) { /** * X holds data, * Y holds probabilities * Z will hold results */ // TODO: we probably might want to skip this sum, and state that probabilities array should be real probabilities, i.e. should sum to 1.0 //T probSum = extraArguments[0]; __shared__ Nd4jLong xLength; __shared__ Nd4jLong yLength; __shared__ Nd4jLong zLength; __shared__ Nd4jLong xEWS; __shared__ Nd4jLong yEWS; __shared__ Nd4jLong zEWS; __shared__ nd4j::random::RandomBuffer *buffer; __shared__ unsigned char *cB; __shared__ unsigned char *dB; __shared__ nd4j::random::RandomBuffer *devBuffer; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; buffer = (nd4j::random::RandomBuffer *) shmem; cB = shmem; devBuffer = reinterpret_cast<nd4j::random::RandomBuffer *> (state); dB = reinterpret_cast<unsigned char *> (state); xLength = shape::length(xShapeBuffer); yLength = shape::length(yShapeBuffer); zLength = shape::length(zShapeBuffer); xEWS = shape::elementWiseStride(xShapeBuffer); yEWS = shape::elementWiseStride(yShapeBuffer); zEWS = shape::elementWiseStride(zShapeBuffer); } __syncthreads(); // using this loop instead of memcpy for (int e = threadIdx.x; e < sizeof(nd4j::random::RandomBuffer); e+= blockDim.x) { cB[e] = dB[e]; } __syncthreads(); int tid = blockIdx.x * blockDim.x + threadIdx.x; if (zEWS >= 1 && xEWS >= 1 && yEWS >= 1) { for (Nd4jLong e = tid; e < zLength; e+=blockDim.x * gridDim.x) { T prob = buffer->relativeT<T>(e); T cumProb = (T) 0.0f; for (Nd4jLong f = 0; f < yLength; f++) { T relProb = y[f * yEWS]; cumProb += relProb; if (prob <= cumProb || f == yLength - 1) { z[e * zEWS] = x[f * xEWS]; f += yLength; } __syncthreads(); } __syncthreads(); } } else { Nd4jLong xCoord[MAX_RANK]; Nd4jLong yCoord[MAX_RANK]; Nd4jLong zCoord[MAX_RANK]; __shared__ int xRank; __shared__ int yRank; __shared__ int zRank; __shared__ Nd4jLong *xShape; __shared__ Nd4jLong *yShape; __shared__ Nd4jLong *zShape; __shared__ Nd4jLong *xStride; __shared__ Nd4jLong *yStride; __shared__ Nd4jLong *zStride; if (threadIdx.x == 0) { xRank = shape::rank(xShapeBuffer); yRank = shape::rank(yShapeBuffer); zRank = shape::rank(zShapeBuffer); xShape = shape::shapeOf(xShapeBuffer); yShape = shape::shapeOf(yShapeBuffer); zShape = shape::shapeOf(zShapeBuffer); xStride = shape::stride(xShapeBuffer); yStride = shape::stride(yShapeBuffer); zStride = shape::stride(zShapeBuffer); } __syncthreads(); for (Nd4jLong i = tid; i < zLength; i+=blockDim.x * gridDim.x) { shape::ind2sub(zRank, zShape, i, zCoord); auto zOffset2 = shape::getOffset(0, zShape, zStride, zCoord, zRank); T prob = buffer->relativeT<T>(i); T cumProb = (T) 0.0f; for (Nd4jLong f = 0; f < yLength; f++) { shape::ind2sub(yRank, yShape, i, yCoord); auto yOffset2 = shape::getOffset(0, yShape, yStride, yCoord, yRank); T relProb = y[yOffset2]; cumProb += relProb; if (prob <= cumProb || f == yLength - 1) { shape::ind2sub(xRank, xShape, f, xCoord); auto xOffset2 = shape::getOffset(0, xShape, xStride, xCoord, xRank); z[zOffset2] = x[xOffset2]; f += yLength; } __syncthreads(); } __syncthreads(); } } __syncthreads(); devBuffer->rewind(zLength); } #endif static inline void specialOp(Nd4jPointer state, T *x, Nd4jLong *xShapeBuffer, T *y, Nd4jLong *yShapeBuffer, T *z, Nd4jLong *zShapeBuffer, T *extraArguments) { /** * X holds data, * Y holds probabilities * Z will hold results */ nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (state); // TODO: we probably might want to skip this sum, and state that probabilities array should be real probabilities, i.e. should sum to 1.0 //T probSum = extraArguments[0]; Nd4jLong yLength = shape::length(yShapeBuffer); Nd4jLong zLength = shape::length(zShapeBuffer); auto xEWS = shape::elementWiseStride(xShapeBuffer); auto yEWS = shape::elementWiseStride(yShapeBuffer); auto zEWS = shape::elementWiseStride(zShapeBuffer); int elementsPerThread = zLength / TAD_THRESHOLD; int _threads = nd4j::math::nd4j_max<int>(1, elementsPerThread); _threads = nd4j::math::nd4j_min<int>(_threads, omp_get_max_threads()); if (zEWS >= 1 && xEWS >= 1 && yEWS >= 1) { #pragma omp parallel for num_threads(_threads) if (_threads > 1) schedule(guided) for (Nd4jLong e = 0; e < zLength; e++) { T prob = buffer->relativeT<T>(e); T cumProb = (T) 0.0f; for (Nd4jLong f = 0; f < yLength; f++) { T relProb = y[f * yEWS]; cumProb += relProb; if (prob <= cumProb || f == yLength - 1) { z[e * zEWS] = x[f * xEWS]; f += yLength; } } } } else { Nd4jLong xCoord[MAX_RANK]; Nd4jLong yCoord[MAX_RANK]; Nd4jLong zCoord[MAX_RANK]; int xRank = shape::rank(xShapeBuffer); int yRank = shape::rank(yShapeBuffer); int zRank = shape::rank(zShapeBuffer); auto xShape = shape::shapeOf(xShapeBuffer); auto yShape = shape::shapeOf(yShapeBuffer); auto zShape = shape::shapeOf(zShapeBuffer); auto xStride = shape::stride(xShapeBuffer); auto yStride = shape::stride(yShapeBuffer); auto zStride = shape::stride(zShapeBuffer); #pragma omp parallel for num_threads(_threads) if (_threads > 1) schedule(guided) for (Nd4jLong i = 0; i < zLength; i++) { shape::ind2sub(zRank, zShape, i, zCoord); auto zOffset2 = shape::getOffset(0, zShape, zStride, zCoord, zRank); T prob = buffer->relativeT<T>(i); T cumProb = (T) 0.0f; for (Nd4jLong f = 0; f < yLength; f++) { shape::ind2sub(yRank, yShape, i, yCoord); auto yOffset2 = shape::getOffset(0, yShape, yStride, yCoord, yRank); T relProb = y[yOffset2]; cumProb += relProb; if (prob <= cumProb || f == yLength - 1) { shape::ind2sub(xRank, xShape, f, xCoord); Nd4jLong xOffset2 = shape::getOffset(0, xShape, xStride, xCoord, xRank); z[zOffset2] = x[xOffset2]; f += yLength; } } } } // update rng state buffer->rewindH(zLength); } }; ////////////////////////////////////////////////////////////////////// /** * This Op produces random values within specified boundaries. Distribuion is Gaussian */ template<typename T> class GaussianDistribution { public: method_XY method_X method_idx static const bool requiresSpecial = true; #ifdef __CUDACC__ __device__ static inline void specialOpCuda(Nd4jPointer state, T *x, Nd4jLong *xShapeBuffer, T *y, Nd4jLong *yShapeBuffer, T *z, Nd4jLong *zShapeBuffer, T *extraArguments) { __shared__ T epsilon; __shared__ T two_pi; __shared__ Nd4jLong zLength; __shared__ Nd4jLong zEWS; __shared__ Nd4jLong yEWS; __shared__ T mean; __shared__ T stddev; __shared__ int step; __shared__ T *tZ; __shared__ nd4j::random::RandomBuffer *buffer; __shared__ unsigned char *cB; __shared__ unsigned char *dB; __shared__ nd4j::random::RandomBuffer *devBuffer; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; buffer = reinterpret_cast<nd4j::random::RandomBuffer *>(shmem); cB = shmem; devBuffer = reinterpret_cast<nd4j::random::RandomBuffer *> (state); dB = reinterpret_cast<unsigned char *> (state); tZ = reinterpret_cast<T *>(shmem + sizeof(nd4j::random::RandomBuffer)); zLength = shape::length(zShapeBuffer); zEWS = shape::elementWiseStride(zShapeBuffer); yEWS = shape::elementWiseStride(yShapeBuffer); epsilon = static_cast<T>(1e-5); two_pi = static_cast<T>(2.0f) * static_cast<T>(3.14159265358979323846); mean = extraArguments[0]; stddev = extraArguments[1]; step = (blockDim.x * gridDim.x); } __syncthreads(); // using this loop instead of memcpy for (int e = threadIdx.x; e < sizeof(nd4j::random::RandomBuffer); e+= blockDim.x) { cB[e] = dB[e]; } __syncthreads(); Nd4jLong tid = blockIdx.x * blockDim.x + threadIdx.x; for (Nd4jLong e = tid; e < zLength; e += step) { // we need to get random values tZ[threadIdx.x] = buffer->relativeT<T>(e, epsilon, static_cast<T>(1.0f)); // fix for "next rng value" if (e + 1 >= zLength && e % 2 == 0) { tZ[threadIdx.x+1] = buffer->relativeT<T>(e+1, epsilon, static_cast<T>(1.0f)); } T realMean = y == z ? mean : y[e * yEWS]; __syncthreads(); if (e % 2 == 0) z[e *zEWS] = (nd4j::math::nd4j_sqrt<T>(static_cast<T>(-2.0f) * nd4j::math::nd4j_log<T>(tZ[threadIdx.x])) * nd4j::math::nd4j_cos<T>(two_pi * tZ[threadIdx.x+1])) * stddev + realMean; else z[e *zEWS] = (nd4j::math::nd4j_sqrt<T>(static_cast<T>(-2.0f) * nd4j::math::nd4j_log<T>(tZ[threadIdx.x-1])) * nd4j::math::nd4j_sin<T>(two_pi * tZ[threadIdx.x])) * stddev + realMean; __syncthreads(); } __syncthreads(); devBuffer->rewind(zLength); } #endif static inline void specialOp(Nd4jPointer state, T *x, Nd4jLong *xShapeBuffer, T *y, Nd4jLong *yShapeBuffer, T *z, Nd4jLong *zShapeBuffer, T *extraArguments) { const T two_pi = static_cast<T>(2.0f) * static_cast<T>(3.14159265358979323846); auto zLength = shape::length(zShapeBuffer); auto yEWS = shape::elementWiseStride(yShapeBuffer); auto zEWS = shape::elementWiseStride(zShapeBuffer); int elementsPerThread = zLength / TAD_THRESHOLD; int _threads = nd4j::math::nd4j_max<int>(1, elementsPerThread); _threads = nd4j::math::nd4j_min<int>(_threads, omp_get_max_threads()); int span = (zLength / _threads) + 8; // we're enforcing even chunks, since it's mandatory for this algorithm span -= span % 2; nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (state); T mean = extraArguments[0]; T stddev = extraArguments[1]; #pragma omp parallel num_threads(_threads) if (_threads > 1) proc_bind(spread) { int tid = omp_get_thread_num(); Nd4jLong start = span * tid; Nd4jLong end = span * (tid + 1); if (end > zLength) end = zLength; T z0, z1; T u0, u1; T lnU0; bool generated = false; for (Nd4jLong e = start; e < end; e++) { if (!generated) { /* * Since box-muller transform expects non-zero u0 value, we'll just use rng with boundaries */ u0 = buffer->relativeT<T>(e, static_cast<T>(1e-5f), static_cast<T>(1.0f)); u1 = buffer->relativeT<T>((e + 1), static_cast<T>(1e-5f), static_cast<T>(1.0f)); lnU0 = nd4j::math::nd4j_sqrt<T>(static_cast<T>(-2.0f) * nd4j::math::nd4j_log<T>(u0)); z0 = lnU0 * nd4j::math::nd4j_cos<T>(two_pi * u1); z1 = lnU0 * nd4j::math::nd4j_sin<T>(two_pi * u1); generated = true; T realMean = y == z ? mean : y[e * yEWS]; z[e * zEWS] = z0 * stddev + realMean; } else { T realMean = y == z ? mean : y[e * yEWS]; z[e * zEWS] = z1 * stddev + realMean; generated = false; } } } // update rng state buffer->rewindH(zLength); } }; ////////////////////////////////////////////////////////////////////// /** * This Op produces random values within [0..N], Distribuion is binomial */ template<typename T> class BinomialDistribution { public: method_XY method_X method_idx static const bool requiresSpecial = true; #ifdef __CUDACC__ __device__ static inline void specialOpCuda(Nd4jPointer state, T *x, Nd4jLong *xShapeBuffer, T *y, Nd4jLong *yShapeBuffer, T *z, Nd4jLong *zShapeBuffer, T *extraArguments) { int trials = (int) extraArguments[0]; T prob = extraArguments[1]; __shared__ Nd4jLong zLength; __shared__ int yEWS; __shared__ int zEWS; __shared__ nd4j::random::RandomBuffer *buffer; __shared__ unsigned char *cB; __shared__ unsigned char *dB; __shared__ nd4j::random::RandomBuffer *devBuffer; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; buffer = reinterpret_cast<nd4j::random::RandomBuffer *>(shmem); cB = shmem; devBuffer = reinterpret_cast<nd4j::random::RandomBuffer *>(state); dB = reinterpret_cast<unsigned char *> (state); zLength = shape::length(zShapeBuffer); yEWS = shape::elementWiseStride(yShapeBuffer); zEWS = shape::elementWiseStride(zShapeBuffer); } __syncthreads(); // using this loop instead of memcpy for (int e = threadIdx.x; e < sizeof(nd4j::random::RandomBuffer); e+= blockDim.x) { cB[e] = dB[e]; } __syncthreads(); int tid = blockIdx.x * blockDim.x + threadIdx.x; for (Nd4jLong e = tid; e < zLength; e += blockDim.x * gridDim.x) { int success = 0; for (int t = 1; t <= trials; t++) { T randVal = buffer->relativeT<T>((e+1) * t); if (y != z) { // we're using external probs prob = y[(t-1) * yEWS]; } if (randVal < prob) success++; } // we need this, to eliminate excessive code branching in runtime __syncthreads(); // if trials is set to 0, effectively we just have successful memset z[e * zEWS] = static_cast<T>(success); } __syncthreads(); if (trials > 0) devBuffer->rewind(zLength * trials); } #endif static inline void specialOp(Nd4jPointer state, T *x, Nd4jLong *xShapeBuffer, T *y, Nd4jLong *yShapeBuffer, T *z, Nd4jLong *zShapeBuffer, T *extraArguments) { int trials = (int) extraArguments[0]; Nd4jLong zLength = shape::length(zShapeBuffer); auto yEWS = shape::elementWiseStride(yShapeBuffer); auto zEWS = shape::elementWiseStride(zShapeBuffer); int elementsPerThread = zLength / TAD_THRESHOLD; int _threads = nd4j::math::nd4j_max<int>(1, elementsPerThread); _threads = nd4j::math::nd4j_min<int>(_threads, omp_get_max_threads()); int span = (zLength / _threads) + 8; nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (state); #pragma omp parallel num_threads(_threads) if (_threads > 1) proc_bind(spread) { int tid = omp_get_thread_num(); Nd4jLong start = span * tid; Nd4jLong end = span * (tid + 1); if (end > zLength) end = zLength; T prob = extraArguments[1]; for (Nd4jLong e = start; e < end; e++) { int success = 0; for (int t = 1; t <= trials; t++) { T randVal = buffer->relativeT<T>((e+1) * t); if (y != z) { // we're using external probs prob = y[(t-1) * yEWS]; } if (randVal < prob) success++; } // if trials is set to 0, effectively we just have successful memset z[e * zEWS] = static_cast<T>(success); } } // update rng state if (trials > 0) buffer->rewindH(zLength * trials); } }; ////////////////////////////////////////////////////////////////////// /** * This Op produces random values within [0..N], Distribuion is binomial */ template<typename T> class BinomialDistributionEx { public: method_XY method_X method_idx static const bool requiresSpecial = true; #ifdef __CUDACC__ __device__ static inline void specialOpCuda(Nd4jPointer state, T *x, Nd4jLong *xShapeBuffer, T *y, Nd4jLong *yShapeBuffer, T *z, Nd4jLong *zShapeBuffer, T *extraArguments) { int trials = (int) extraArguments[0]; T prob = extraArguments[1]; __shared__ Nd4jLong zLength; __shared__ int yEWS; __shared__ int zEWS; __shared__ nd4j::random::RandomBuffer *buffer; __shared__ unsigned char *cB; __shared__ unsigned char *dB; __shared__ nd4j::random::RandomBuffer *devBuffer; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; buffer = (nd4j::random::RandomBuffer *) shmem; cB = shmem; devBuffer = reinterpret_cast<nd4j::random::RandomBuffer *> (state); dB = reinterpret_cast<unsigned char *> (state); zLength = shape::length(zShapeBuffer); yEWS = shape::elementWiseStride(yShapeBuffer); zEWS = shape::elementWiseStride(zShapeBuffer); } __syncthreads(); // using this loop instead of memcpy for (int e = threadIdx.x; e < sizeof(nd4j::random::RandomBuffer); e+= blockDim.x) { cB[e] = dB[e]; } __syncthreads(); int tid = blockIdx.x * blockDim.x + threadIdx.x; for (Nd4jLong e = tid; e < zLength; e += blockDim.x * gridDim.x) { int success = 0; for (int t = 1; t <= trials; t++) { T randVal = buffer->relativeT<T>((e+1) * t); if (y != z) { // we're using external probs prob = y[e * yEWS]; } if (randVal < prob) success++; } // we need this, to eliminate excessive code branching in runtime __syncthreads(); // if trials is set to 0, effectively we just have successful memset z[e * zEWS] = (T) success; } __syncthreads(); if (trials > 0) devBuffer->rewind(zLength * trials); } #endif static inline void specialOp(Nd4jPointer state, T *x, Nd4jLong *xShapeBuffer, T *y, Nd4jLong *yShapeBuffer, T *z, Nd4jLong *zShapeBuffer, T *extraArguments) { int trials = (int) extraArguments[0]; Nd4jLong zLength = shape::length(zShapeBuffer); auto yEWS = shape::elementWiseStride(yShapeBuffer); auto zEWS = shape::elementWiseStride(zShapeBuffer); int elementsPerThread = zLength / TAD_THRESHOLD; int _threads = nd4j::math::nd4j_max<int>(1, elementsPerThread); _threads = nd4j::math::nd4j_min<int>(_threads, omp_get_max_threads()); auto span = (zLength / _threads) + 8; nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (state); #pragma omp parallel num_threads(_threads) if (_threads > 1) proc_bind(spread) { int tid = omp_get_thread_num(); Nd4jLong start = span * tid; Nd4jLong end = span * (tid + 1); if (end > zLength) end = zLength; T prob = extraArguments[1]; for (Nd4jLong e = start; e < end; e++) { int success = 0; for (int t = 1; t <= trials; t++) { T randVal = buffer->relativeT<T>((e+1) * t); if (y != z) { // we're using external probs prob = y[e * yEWS]; } if (randVal < prob) success++; } // if trials is set to 0, effectively we just have successful memset z[e * zEWS] = static_cast<T>(success); } } // update rng state if (trials > 0) buffer->rewindH(zLength * trials); } }; ////////////////////////////////////////////////////////////////////// // This Op produces random Gaussian values within [mean-2*stddev,mean+2*stddev] template<typename T> class TruncatedNormalDistribution { public: method_XY method_X method_idx static const bool requiresSpecial = true; #ifdef __CUDACC__ __device__ static inline void specialOpCuda(Nd4jPointer state, T *x, Nd4jLong *xShapeBuffer, T *y, Nd4jLong *yShapeBuffer, T *z, Nd4jLong *zShapeBuffer, T *extraArguments) { __shared__ T epsilon; __shared__ T two_pi; __shared__ Nd4jLong zLength; __shared__ Nd4jLong zEWS; __shared__ Nd4jLong yEWS; __shared__ T mean; __shared__ T stddev; __shared__ int step; __shared__ T *tZ; __shared__ nd4j::random::RandomBuffer *buffer; __shared__ unsigned char *cB; __shared__ unsigned char *dB; __shared__ nd4j::random::RandomBuffer *devBuffer; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; buffer = reinterpret_cast<nd4j::random::RandomBuffer *>(shmem); cB = shmem; devBuffer = reinterpret_cast<nd4j::random::RandomBuffer *> (state); dB = reinterpret_cast<unsigned char *> (state); tZ = reinterpret_cast<T *>(shmem + sizeof(nd4j::random::RandomBuffer)); zLength = shape::length(zShapeBuffer); zEWS = shape::elementWiseStride(zShapeBuffer); yEWS = shape::elementWiseStride(yShapeBuffer); epsilon = static_cast<T>(1e-6f); two_pi = static_cast<T>(2.0f) * static_cast<T>(3.14159265358979323846); mean = extraArguments[0]; stddev = extraArguments[1]; step = (blockDim.x * gridDim.x); } __syncthreads(); // using this loop instead of memcpy for (int e = threadIdx.x; e < sizeof(nd4j::random::RandomBuffer); e+= blockDim.x) { cB[e] = dB[e]; } __syncthreads(); int tid = blockIdx.x * blockDim.x + threadIdx.x; int middle = zLength % 2 == 0 ? zLength / 2 : zLength / 2 + 1; T result0, result1, u0, u1, z0, z1, uT, uP; T ds = nd4j::math::nd4j_abs<T>(stddev) * static_cast<T>(2.0f); for (Nd4jLong e = tid; e < middle; e += step) { // we need to get random values Nd4jLong generation0 = 0; auto epm = e + middle; T realMean0 = y == z ? mean : y[e * yEWS]; T realMean1 = y == z ? mean : y[epm * yEWS]; T aRealMean0 = nd4j::math::nd4j_abs<T>(realMean0); T aRealMean1 = nd4j::math::nd4j_abs<T>(realMean1); do { u0 = buffer->relativeT<T>(e + generation0, epsilon, static_cast<T>(1.0f)); u1 = buffer->relativeT<T>(epm + generation0, epsilon, static_cast<T>(1.0f)); uT = nd4j::math::nd4j_sqrt<T>(static_cast<T>(-2.0f) * nd4j::math::nd4j_log<T>(u0)); uP = two_pi * u1; z0 = uT * nd4j::math::nd4j_cos<T>(uP); z1 = uT * nd4j::math::nd4j_sin<T>(uP); result0 = z0 * stddev + realMean0; result1 = z1 * stddev + realMean1; generation0 += zLength; } while (ds < aRealMean0 + nd4j::math::nd4j_abs<T>(result0) || aRealMean1 + nd4j::math::nd4j_abs<T>(result1) > ds); z[e * zEWS] = result0; if((epm) < zLength) z[epm * zEWS] = result1; } __syncthreads(); devBuffer->rewind(zLength); } #endif static inline void specialOp(Nd4jPointer state, T *x, Nd4jLong *xShapeBuffer, T *y, Nd4jLong *yShapeBuffer, T *z, Nd4jLong *zShapeBuffer, T *extraArguments) { const T two_pi = static_cast<T>(2.0f) * static_cast<T>(3.14159265358979323846); Nd4jLong zLength = shape::length(zShapeBuffer); auto yEWS = shape::elementWiseStride(yShapeBuffer); auto zEWS = shape::elementWiseStride(zShapeBuffer); auto middle = zLength % 2 == 0 ? zLength / 2 : zLength / 2 + 1; int elementsPerThread = middle / TAD_THRESHOLD; int _threads = nd4j::math::nd4j_max<int>(1, elementsPerThread); _threads = nd4j::math::nd4j_min<int>(_threads, omp_get_max_threads()); int span = (middle / _threads) + 8; // we're enforcing even chunks, since it's mandatory for this algorithm span -= span % 2; nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (state); T mean = extraArguments[0]; T stddev = extraArguments[1]; #pragma omp parallel num_threads(_threads) if (_threads > 1) proc_bind(spread) { int tid = omp_get_thread_num(); Nd4jLong start = span * tid; Nd4jLong end = span * (tid + 1); if (end > middle) { end = middle; } T z0, z1; T u0, u1; T result0, result1, lnu0, lnu1; T ds = nd4j::math::nd4j_abs<T>(stddev) * (T) 2.0f; for (Nd4jLong e = start; e < end; e++) { /* * Since box-muller transform expects non-zero u0 value, we'll just use rng with boundaries */ Nd4jLong generation0 = 0; auto epm = e + middle; T realMean0 = y == z ? mean : y[e * yEWS]; T realMean1 = y == z ? mean : y[epm * yEWS]; T aRealMean0 = nd4j::math::nd4j_abs<T>(realMean0); T aRealMean1 = nd4j::math::nd4j_abs<T>(realMean1); do { u0 = buffer->relativeT<T>(e + generation0, static_cast<T>(1e-6f), static_cast<T>(1.0f)); u1 = buffer->relativeT<T>((epm + generation0), static_cast<T>(1e-6f), static_cast<T>(1.0f)); lnu0 = nd4j::math::nd4j_sqrt<T>(static_cast<T>(-2.0f) * nd4j::math::nd4j_log<T>(u0)); lnu1 = two_pi * u1; z0 = lnu0 * nd4j::math::nd4j_cos<T>(lnu1); z1 = lnu0 * nd4j::math::nd4j_sin<T>(lnu1); result0 = z0 * stddev + realMean0; result1 = z1 * stddev + realMean1; generation0 += zLength; } while (aRealMean0 + nd4j::math::nd4j_abs<T>(result0) > ds || aRealMean1 + nd4j::math::nd4j_abs<T>(result1) > ds); z[e*zEWS] = result0; if(epm < zLength) z[epm * zEWS] = result1; } } // update rng state buffer->rewindH(zLength); } }; ////////////////////////////////////////////////////////////////////// // This Op produces random Log-normal distribution template<typename T> class LogNormalDistribution { public: method_XY method_X method_idx static const bool requiresSpecial = true; #ifdef __CUDACC__ __device__ static inline void specialOpCuda(Nd4jPointer state, T *x, Nd4jLong *xShapeBuffer, T *y, Nd4jLong *yShapeBuffer, T *z, Nd4jLong *zShapeBuffer, T *extraArguments) { __shared__ T epsilon; __shared__ T two_pi; __shared__ Nd4jLong zLength; __shared__ Nd4jLong zEWS; __shared__ Nd4jLong yEWS; __shared__ T mean; __shared__ T stddev; __shared__ int step; __shared__ T *tZ; __shared__ nd4j::random::RandomBuffer *buffer; __shared__ unsigned char *cB; __shared__ unsigned char *dB; __shared__ nd4j::random::RandomBuffer *devBuffer; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; buffer = reinterpret_cast<nd4j::random::RandomBuffer *>(shmem); cB = shmem; devBuffer = reinterpret_cast<nd4j::random::RandomBuffer *> (state); dB = reinterpret_cast<unsigned char *> (state); tZ = reinterpret_cast<T*>(shmem + sizeof(nd4j::random::RandomBuffer)); zLength = shape::length(zShapeBuffer); zEWS = shape::elementWiseStride(zShapeBuffer); yEWS = shape::elementWiseStride(yShapeBuffer); epsilon = static_cast<T>(1e-5); two_pi = static_cast<T>(2.0f) * static_cast<T>(3.14159265358979323846); mean = extraArguments[0]; stddev = extraArguments[1]; step = (blockDim.x * gridDim.x); } __syncthreads(); // using this loop instead of memcpy for (int e = threadIdx.x; e < sizeof(nd4j::random::RandomBuffer); e+= blockDim.x) { cB[e] = dB[e]; } __syncthreads(); int tid = blockIdx.x * blockDim.x + threadIdx.x; for (Nd4jLong e = tid; e < zLength; e += step) { // we need to get random values tZ[threadIdx.x] = buffer->relativeT<T>(e, epsilon, static_cast<T>(1.0f)); // fix for "next rng value" if (e + 1 >= zLength && e % 2 == 0) { tZ[threadIdx.x+1] = buffer->relativeT<T>(e+1, epsilon, static_cast<T>(1.0f)); } T realMean = y == z ? mean : y[e * yEWS]; __syncthreads(); if (e % 2 == 0) z[e *zEWS] = nd4j::math::nd4j_exp<T>((nd4j::math::nd4j_sqrt<T>(static_cast<T>(-2.0f) * nd4j::math::nd4j_log<T>(tZ[threadIdx.x])) * nd4j::math::nd4j_cos<T>(two_pi * tZ[threadIdx.x+1])) * stddev + realMean); else z[e *zEWS] = nd4j::math::nd4j_exp<T>((nd4j::math::nd4j_sqrt<T>(static_cast<T>(-2.0f) * nd4j::math::nd4j_log<T>(tZ[threadIdx.x-1])) * nd4j::math::nd4j_sin<T>(two_pi * tZ[threadIdx.x])) * stddev + realMean); __syncthreads(); } __syncthreads(); devBuffer->rewind(zLength); } #endif static inline void specialOp(Nd4jPointer state, T *x, Nd4jLong *xShapeBuffer, T *y, Nd4jLong *yShapeBuffer, T *z, Nd4jLong *zShapeBuffer, T *extraArguments) { const T two_pi = static_cast<T>(2.0f) * static_cast<T>(3.14159265358979323846); Nd4jLong zLength = shape::length(zShapeBuffer); auto yEWS = shape::elementWiseStride(yShapeBuffer); auto zEWS = shape::elementWiseStride(zShapeBuffer); int elementsPerThread = zLength / TAD_THRESHOLD; int _threads = nd4j::math::nd4j_max<int>(1, elementsPerThread); _threads = nd4j::math::nd4j_min<int>(_threads, omp_get_max_threads()); int span = (zLength / _threads) + 8; // we're enforcing even chunks, since it's mandatory for this algorithm span -= span % 2; auto buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (state); T mean = extraArguments[0]; T stddev = extraArguments[1]; #pragma omp parallel num_threads(_threads) if (_threads > 1) proc_bind(spread) { int tid = omp_get_thread_num(); Nd4jLong start = span * tid; Nd4jLong end = span * (tid + 1); if (end > zLength) end = zLength; T z0, z1; T u0, u1; T lnU0; bool generated = false; for (Nd4jLong e = start; e < end; e++) { if (!generated) { /* * Since box-muller transform expects non-zero u0 value, we'll just use rng with boundaries */ u0 = buffer->relativeT<T>(e, static_cast<T>(1e-5f), static_cast<T>(1.0f)); u1 = buffer->relativeT<T>((e + 1), static_cast<T>(1e-5f), static_cast<T>(1.0f)); lnU0 = nd4j::math::nd4j_sqrt<T>(static_cast<T>(-2.0f) * nd4j::math::nd4j_log<T>(u0)); z0 = lnU0 * nd4j::math::nd4j_cos<T>(two_pi * u1); z1 = lnU0 * nd4j::math::nd4j_sin<T>(two_pi * u1); generated = true; T realMean = y == z ? mean : y[e * yEWS]; z[e * zEWS] = nd4j::math::nd4j_exp<T>(z0 * stddev + realMean); } else { T realMean = y == z ? mean : y[e * yEWS]; z[e * zEWS] = nd4j::math::nd4j_exp<T>(z1 * stddev + realMean); generated = false; } } } // update rng state buffer->rewindH(zLength); } }; } #endif //LIBND4J_SPECIAL_RANDOM_OPS_H
if_clause_modificado.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> int main(int argc, char **argv){ int i,n=20,tid,x; int a[n],suma=0,sumalocal; if(argc<3){ fprintf(stderr,"[ERROR]-Falta iteraciones\n"); exit(-1); } n= atoi(argv[1]); if(n>20)n=20; for(i=0;i<n;i++){ a[i]=i; } x= atoi(argv[2]); #pragma omp parallel num_threads(x) if(n>4) default(none)\ private(sumalocal,tid) shared(a,suma,n) { sumalocal=0; tid=omp_get_thread_num(); #pragma omp for private(i) schedule(static) nowait for (i=0; i<n; i++) { sumalocal += a[i]; printf( "thread %d suma de a[%d]=%d sumalocal=%d \n", tid,i,a[i],sumalocal); } #pragma omp atomic suma +=sumalocal; #pragma omp barrier #pragma omp master printf("thread master=%d imprime suma=%d\n",tid,suma); } }
top_k_v2_op.h
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* The reason why we need the topk v2 is because the compatibility. We redefine the NaN is maximum value in the process of comparing. If do not add the topk v2, will affect the inference result of model that traing by the older version paddlepaddle. */ #pragma once #include <algorithm> #include <iostream> #include <utility> #include <vector> #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/top_k_op.h" #include "paddle/fluid/operators/transpose_op.h" namespace paddle { namespace operators { inline void GetDims(const framework::DDim& dim, int axis, int* pre, int* n, int* post) { *pre = 1; *post = 1; *n = dim[axis]; for (int i = 0; i < axis; ++i) { (*pre) *= dim[i]; } for (int i = axis + 1; i < dim.size(); ++i) { (*post) *= dim[i]; } } template <typename T, typename Type> static void FullTopK(Type input_height, Type input_width, int input_dim, const framework::Tensor* input, T* t_out, Type* t_indices, const int& k, const bool& largest, const bool& sorted) { // when the k is small, will the partial sort bool partial_sort_flag = (k * 64) < input_width; #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif // Eigen::DSizes<int, 2> flat2dims(input_height, input_width); for (Type i = 0; i < input_height; ++i) { std::vector<std::pair<T, Type>> col_vec; col_vec.reserve(input_width); if (input_dim == 1) { auto e_input = framework::EigenVector<T>::Flatten(*input); for (Type j = 0; j < input_width; ++j) { col_vec.emplace_back(std::pair<T, Type>(e_input(j), j)); } } else { auto e_input = framework::EigenMatrix<T>::Reshape(*input, input_dim - 1); for (Type j = 0; j < input_width; ++j) { col_vec.emplace_back(std::pair<T, Type>(e_input(i, j), j)); } } if (partial_sort_flag) { std::partial_sort( col_vec.begin(), col_vec.begin() + k, col_vec.end(), [&largest](const std::pair<T, Type>& l, const std::pair<T, Type>& r) { if (largest) { return (std::isnan(static_cast<double>(l.first)) && !std::isnan(static_cast<double>(r.first))) || (l.first > r.first); } else { return (!std::isnan(static_cast<double>(l.first)) && std::isnan(static_cast<double>(r.first))) || (l.first < r.first); } }); } else { // use the nth-element to get the K-larger or K-small element if (largest) { std::nth_element( col_vec.begin(), col_vec.begin() + k - 1, col_vec.end(), [](const std::pair<T, Type>& l, const std::pair<T, Type>& r) { return (std::isnan(static_cast<double>(l.first)) && !std::isnan(static_cast<double>(r.first))) || (l.first > r.first); }); // the nth-element will get the unorder elements, sort the element if (sorted) { std::sort(col_vec.begin(), col_vec.begin() + k - 1, [&largest](const std::pair<T, Type>& l, const std::pair<T, Type>& r) { return (std::isnan(static_cast<double>(l.first)) && !std::isnan(static_cast<double>(r.first))) || (l.first > r.first); }); } } else { std::nth_element( col_vec.begin(), col_vec.begin() + k - 1, col_vec.end(), [](const std::pair<T, Type>& l, const std::pair<T, Type>& r) { return (!std::isnan(static_cast<double>(l.first)) && std::isnan(static_cast<double>(r.first))) || (l.first < r.first); }); // the nth-element will get the unorder elements, sort the element if (sorted) { std::sort( col_vec.begin(), col_vec.begin() + k - 1, [](const std::pair<T, Type>& l, const std::pair<T, Type>& r) { return (!std::isnan(static_cast<double>(l.first)) && std::isnan(static_cast<double>(r.first))) || (l.first < r.first); }); } } } for (Type j = 0; j < k; ++j) { t_out[i * k + j] = col_vec[j].first; t_indices[i * k + j] = col_vec[j].second; } } } template <typename T, typename Type> static void FullTopKAssign(const Type& input_height, const Type& input_width, const int& input_dim, const framework::Tensor* input, const framework::Tensor* indices, T* output_data, const int& k) { #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (Type i = 0; i < input_height; ++i) { if (input_dim == 1) { auto e_input = framework::EigenVector<T>::Flatten(*input); auto e_indices = framework::EigenVector<Type>::Flatten(*indices); for (Type j = 0; j < k; ++j) { output_data[i * input_width + e_indices(j)] = e_input(j); } } else { auto e_input = framework::EigenMatrix<T>::Reshape(*input, input_dim - 1); auto e_indices = framework::EigenMatrix<Type>::Reshape(*indices, input_dim - 1); for (Type j = 0; j < k; ++j) { output_data[i * input_width + e_indices(i, j)] = e_input(i, j); } } } } template <typename DeviceContext, typename T> class TopkV2Kernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& context) const override { // Get the top k elements of each row of input tensor auto* input = context.Input<Tensor>("X"); auto* output = context.Output<Tensor>("Out"); auto* indices = context.Output<Tensor>("Indices"); const auto& in_dims = input->dims(); int k = static_cast<int>(context.Attr<int>("k")); const auto& sorted = static_cast<bool>(context.Attr<bool>("sorted")); const auto& largest = static_cast<bool>(context.Attr<bool>("largest")); // axis < 0, cacluate the real axis int axis = static_cast<int>(context.Attr<int>("axis")); if (axis < 0) axis += in_dims.size(); // if K tensor is not null, will the use K tesnor as k auto* k_t = context.Input<Tensor>("K"); if (k_t) { k = k_t->data<int>()[0]; framework::DDim output_dims = output->dims(); // accroding to axis to set K value in the dim output_dims[axis] = k; output->Resize(output_dims); indices->Resize(output_dims); } T* output_data = output->mutable_data<T>(context.GetPlace()); int64_t* indices_data = indices->mutable_data<int64_t>(context.GetPlace()); const auto& out_dims = output->dims(); if (axis + 1 == in_dims.size()) { const int64_t& input_height = framework::product( framework::slice_ddim(in_dims, 0, in_dims.size() - 1)); const int64_t& input_width = in_dims[in_dims.size() - 1]; FullTopK<T, int64_t>(input_height, input_width, in_dims.size(), input, output_data, indices_data, k, largest, sorted); } else { // if the topk dims is not last dim, will tranpose and do topk std::vector<int> trans; for (int i = 0; i < axis; i++) { trans.emplace_back(i); } trans.push_back(in_dims.size() - 1); for (int i = axis + 1; i < in_dims.size() - 1; i++) { trans.emplace_back(i); } trans.emplace_back(axis); // get the trans input_dims, out_dims framework::DDim trans_dims(in_dims); framework::DDim trans_out_dims(output->dims()); for (size_t i = 0; i < trans.size(); i++) { trans_dims[i] = in_dims[trans[i]]; } for (size_t i = 0; i < trans.size(); i++) { trans_out_dims[i] = out_dims[trans[i]]; } Tensor trans_inp; trans_inp.mutable_data<T>(trans_dims, context.GetPlace()); int ndims = trans.size(); auto& dev_context = context.template device_context<platform::CPUDeviceContext>(); // transpose the input value TransCompute<platform::CPUDeviceContext, T>(ndims, dev_context, *input, &trans_inp, trans); const int64_t input_height = framework::product( framework::slice_ddim(trans_dims, 0, trans_dims.size() - 1)); const int64_t input_width = trans_dims[trans_dims.size() - 1]; // Allocate the temp tensor to the save the topk indices, values Tensor tmp_out; T* t_out = tmp_out.mutable_data<T>(trans_out_dims, context.GetPlace()); Tensor tmp_indices; auto* t_ind = tmp_indices.mutable_data<int64_t>(trans_out_dims, context.GetPlace()); // get the TopK value FullTopK<T, int64_t>(input_height, input_width, in_dims.size(), &trans_inp, t_out, t_ind, k, largest, sorted); // transpose back TransCompute<platform::CPUDeviceContext, int64_t>( ndims, dev_context, tmp_indices, indices, trans); TransCompute<platform::CPUDeviceContext, T>(ndims, dev_context, tmp_out, output, trans); } } }; template <typename DeviceContext, typename T> class TopkV2GradKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& context) const override { auto* x = context.Input<Tensor>("X"); auto* out_grad = context.Input<Tensor>(framework::GradVarName("Out")); auto* indices = context.Input<Tensor>("Indices"); auto* x_grad = context.Output<Tensor>(framework::GradVarName("X")); int axis = static_cast<int>(context.Attr<int>("axis")); const auto& in_dims = x->dims(); const auto& out_dims = indices->dims(); // axis < 0, get the real axis axis = (axis < 0) ? (in_dims.size() + axis) : axis; const size_t& k = out_dims[axis]; T* x_grad_data = x_grad->mutable_data<T>(context.GetPlace()); if (axis + 1 == in_dims.size()) { // allocate the memory for the input_grad // assign the out_grad to input_grad directly const int64_t input_height = framework::product( framework::slice_ddim(in_dims, 0, in_dims.size() - 1)); const int64_t input_width = in_dims[in_dims.size() - 1]; // init the output grad with 0, because some input elements has no grad memset(x_grad_data, 0, x_grad->numel() * sizeof(T)); // Assign the output_grad to input_grad FullTopKAssign(input_height, input_width, in_dims.size(), out_grad, indices, x_grad_data, k); } else { // can not assign grad to input_grad, must do the transpose std::vector<int> trans; for (int i = 0; i < axis; i++) { trans.emplace_back(i); } trans.emplace_back(out_dims.size() - 1); for (int i = axis + 1; i < out_dims.size() - 1; i++) { trans.emplace_back(i); } trans.emplace_back(axis); framework::DDim trans_dims(out_dims); framework::DDim trans_in_dims(in_dims); for (size_t i = 0; i < trans.size(); i++) { trans_dims[i] = out_dims[trans[i]]; trans_in_dims[i] = in_dims[trans[i]]; } // transpose the out_grad, indices Tensor trans_dO; trans_dO.mutable_data<T>(trans_dims, context.GetPlace()); Tensor trans_ind; trans_ind.mutable_data<int64_t>(trans_dims, context.GetPlace()); int ndims = trans.size(); auto& dev_context = context.template device_context<platform::CPUDeviceContext>(); // Do transpose TransCompute<platform::CPUDeviceContext, T>(ndims, dev_context, *out_grad, &trans_dO, trans); TransCompute<platform::CPUDeviceContext, int64_t>( ndims, dev_context, *indices, &trans_ind, trans); const int64_t input_height = framework::product( framework::slice_ddim(trans_in_dims, 0, trans_in_dims.size() - 1)); const int64_t input_width = trans_in_dims[trans_in_dims.size() - 1]; // Assign the out_grad to tranpose input_grad Tensor tmp_out; T* t_out = tmp_out.mutable_data<T>(trans_in_dims, context.GetPlace()); memset(t_out, 0, x_grad->numel() * sizeof(T)); FullTopKAssign<T, int64_t>(input_height, input_width, in_dims.size(), &trans_dO, &trans_ind, t_out, k); // Transpose back TransCompute<platform::CPUDeviceContext, T>(ndims, dev_context, tmp_out, x_grad, trans); } } }; } // namespace operators } // namespace paddle
cvAdvDiff_kry_ompdev.c
/* ------------------------------------------------------------------- * Programmer(s): Shelby Lockhart @ LLNL * ------------------------------------------------------------------- * Acknowledgements: This example is based on cvAdvDiff_kry example * by Slaven Peles which is based on cvAdvDiff_bnd * example by Scott D. Cohen, Alan C. * Hindmarsh and Radu Serban @ LLNL * ------------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2021, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ------------------------------------------------------------------- * Example problem: * * The following is a simple example problem with a banded Jacobian, * with the program for its solution by CVODE. * The problem is the semi-discrete form of the advection-diffusion * equation in 2-D: * du/dt = d^2 u / dx^2 + .5 du/dx + d^2 u / dy^2 * on the rectangle 0 <= x <= 2, 0 <= y <= 1, and the time * interval 0 <= t <= 1. Homogeneous Dirichlet boundary conditions * are posed, and the initial condition is * u(x,y,t=0) = x(2-x)y(1-y)exp(5xy). * The PDE is discretized on a uniform MX+2 by MY+2 grid with * central differencing, and with boundary values eliminated, * leaving an ODE system of size NEQ = MX*MY. * This program solves the problem with the BDF method, Newton * iteration with the CVBAND band linear solver, and a user-supplied * Jacobian routine. * It uses scalar relative and absolute tolerances. * Output is printed at t = .1, .2, ..., 1. * Run statistics (optional outputs) are printed at the end. * -----------------------------------------------------------------*/ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <cvode/cvode.h> /* prototypes for CVODE fcts., consts. */ #include <sunlinsol/sunlinsol_spgmr.h> /* access to SPGMR SUNLinearSolver */ #include <sundials/sundials_types.h> /* definition of type realtype */ #include <sundials/sundials_math.h> /* definition of ABS and EXP */ #include <nvector/nvector_openmpdev.h> /* OpenMPDEV N_Vector types, fcts., macros */ #ifdef _OPENMP #include <omp.h> #endif /* Real Constants */ #define ATOL RCONST(1.0e-5) /* scalar absolute tolerance */ #define T0 RCONST(0.0) /* initial time */ #define T1 RCONST(0.1) /* first output time */ #define DTOUT RCONST(0.1) /* output time increment */ #define NOUT 10 /* number of output times */ #define ZERO RCONST(0.0) #define HALF RCONST(0.5) #define ONE RCONST(1.0) #define TWO RCONST(2.0) #define FIVE RCONST(5.0) /* Type : _UserData (contains model and discretization parameters) */ typedef struct { sunindextype MX, MY, NEQ; realtype dx, dy, XMAX, YMAX; realtype hdcoef, hacoef, vdcoef; } *UserData; /*typedef _UserData *UserData;*/ /* Problem setup and initialization functions */ static UserData SetUserData(int argc, char** argv); static void SetIC(N_Vector u, UserData data); /* Functions Called by the Solver */ static int f(realtype t, N_Vector u, N_Vector udot, void *user_data); static int jtv(N_Vector v, N_Vector Jv, realtype t, N_Vector u, N_Vector fu, void *user_data, N_Vector tmp); /* Private Helper Functions */ static void PrintHeader(realtype reltol, realtype abstol, realtype umax, UserData data); static void PrintOutput(realtype t, realtype umax, long int nst); static void PrintFinalStats(void *cvode_mem); /* Private function to check function return values */ static int check_flag(void *flagvalue, const char *funcname, int opt); /* *------------------------------- * Main Program *------------------------------- */ int main(int argc, char** argv) { realtype reltol, abstol, t, tout, umax; N_Vector u; UserData data; SUNLinearSolver LS; void *cvode_mem; int iout, flag; long int nst; u = NULL; data = NULL; LS = NULL; cvode_mem = NULL; /* Set model parameters */ data = SetUserData(argc, argv); if(check_flag((void *)data, "malloc", 2)) return(1); reltol = ZERO; /* Set the tolerances */ abstol = ATOL; /* Create an OpenMPDEV vector with initial values */ u = N_VNew_OpenMPDEV(data->NEQ); /* Allocate u vector */ if(check_flag((void*)u, "N_VNew_Cuda", 0)) return(1); SetIC(u, data); /* Initialize u vector */ /* Call CVodeCreate to create the solver memory and specify the * Backward Differentiation Formula and the use of a Newton iteration */ cvode_mem = CVodeCreate(CV_BDF); if(check_flag((void *)cvode_mem, "CVodeCreate", 0)) return(1); /* Call CVodeInit to initialize the integrator memory and specify the * user's right hand side function in u'=f(t,u), the initial time T0, and * the initial dependent variable vector u. */ flag = CVodeInit(cvode_mem, f, T0, u); if(check_flag(&flag, "CVodeInit", 1)) return(1); /* Call CVodeSStolerances to specify the scalar relative tolerance * and scalar absolute tolerance */ flag = CVodeSStolerances(cvode_mem, reltol, abstol); if (check_flag(&flag, "CVodeSStolerances", 1)) return(1); /* Set the pointer to user-defined data */ flag = CVodeSetUserData(cvode_mem, data); if(check_flag(&flag, "CVodeSetUserData", 1)) return(1); /* Create SPGMR solver without preconditioning * and the maximum Krylov dimension maxl */ LS = SUNLinSol_SPGMR(u, PREC_NONE, 0); if(check_flag(&flag, "SUNLinSol_SPGMR", 1)) return(1); /* Attach the linear solver */ flag = CVodeSetLinearSolver(cvode_mem, LS, NULL); if(check_flag(&flag, "CVodeSetLinearSolver", 1)) return(1); /* Set the JAcobian-times-vector function */ flag = CVodeSetJacTimes(cvode_mem, NULL, jtv); if(check_flag(&flag, "CVodeSetJacTimesVecFn", 1)) return(1); /* In loop over output points: call CVode, print results, test for errors */ umax = N_VMaxNorm(u); PrintHeader(reltol, abstol, umax, data); for(iout=1, tout=T1; iout <= NOUT; iout++, tout += DTOUT) { flag = CVode(cvode_mem, tout, u, &t, CV_NORMAL); if(check_flag(&flag, "CVode", 1)) break; umax = N_VMaxNorm(u); flag = CVodeGetNumSteps(cvode_mem, &nst); check_flag(&flag, "CVodeGetNumSteps", 1); PrintOutput(t, umax, nst); } PrintFinalStats(cvode_mem); /* Print some final statistics */ N_VDestroy(u); /* Free the u vector */ CVodeFree(&cvode_mem); /* Free the integrator memory */ free(data); /* Free the user data */ return(0); } /* *------------------------------------------- * Problem setup and initialization functions *------------------------------------------- */ /* Set model and discretization parameters */ UserData SetUserData(int argc, char *argv[]) { const sunindextype MX = 10; const sunindextype MY = 5; const realtype XMAX = RCONST(2.0); /* domain boundaries */ const realtype YMAX = RCONST(1.0); /* Allocate user data structure */ UserData ud = (UserData) malloc(sizeof *ud); if(check_flag((void*) ud, "AllocUserData", 2)) return(NULL); ud->MX = MX; ud->MY = MY; ud->NEQ = MX*MY; ud->XMAX = XMAX; ud->YMAX = YMAX; ud->dx = XMAX/(MX+1); /* Set grid coefficients in data */ ud->dy = YMAX/(MY+1); ud->hdcoef = ONE/(ud->dx*ud->dx); ud->hacoef = HALF/(TWO*ud->dx); ud->vdcoef = ONE/(ud->dy*ud->dy); return ud; } /* Set initial conditions in u vector */ static void SetIC(N_Vector u, UserData data) { /* Extract needed constants from data */ const realtype dx = data->dx; const realtype dy = data->dy; const realtype xmax = data->XMAX; const realtype ymax = data->YMAX; const sunindextype MY = data->MY; const sunindextype NEQ = data->NEQ; /* Extract pointer to solution vector data on the host */ realtype *udata = N_VGetHostArrayPointer_OpenMPDEV(u); sunindextype i, j, tid; realtype x, y; /* Load initial profile into u vector */ for (tid=0; tid < NEQ; tid++) { i = tid / MY; j = tid % MY; x = (i+1)*dx; y = (j+1)*dy; udata[tid] = x*(xmax - x)*y*(ymax - y)*SUNRexp(FIVE*x*y); } N_VCopyToDevice_OpenMPDEV(u); } /* *------------------------------- * Functions called by the solver *------------------------------- */ /* f routine. Compute f(t,u). */ static int f(realtype t, N_Vector u, N_Vector udot, void *user_data) { realtype uij, udn, uup, ult, urt, hdiff, hadv, vdiff; sunindextype i, j, k; int dev; UserData data = (UserData) user_data; /* Extract needed constants from data */ const sunindextype MX = data->MX; const sunindextype MY = data->MY; const realtype hordc = data->hdcoef; const realtype horac = data->hacoef; const realtype verdc = data->vdcoef; /* Extract pointers to vector data */ const realtype *udata = N_VGetDeviceArrayPointer_OpenMPDEV(u); realtype *dudata = N_VGetDeviceArrayPointer_OpenMPDEV(udot); /* Get device */ dev = omp_get_default_device(); /* Loop over all grid points. */ #pragma omp target map(to:MY,MX,hordc,horac,verdc) is_device_ptr(udata, dudata) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (k=0; k<MY*MX; k++) { i = k/MY; j = k%MY; uij = udata[k]; udn = (j == 0) ? ZERO : udata[k - 1]; uup = (j == MY-1) ? ZERO : udata[k + 1]; ult = (i == 0) ? ZERO : udata[k - MY]; urt = (i == MX-1) ? ZERO : udata[k + MY]; /* Set diffusion and advection terms and load into udot */ hdiff = hordc * (ult - TWO * uij + urt); hadv = horac * (urt - ult); vdiff = verdc * (uup - TWO * uij + udn); dudata[k] = hdiff + hadv + vdiff; } return(0); } /* Jacobian-times-vector routine. */ static int jtv(N_Vector v, N_Vector Jv, realtype t, N_Vector u, N_Vector fu, void *user_data, N_Vector tmp) { sunindextype i, j, k; int dev; UserData data = (UserData) user_data; /* Extract needed constants from data */ const sunindextype MX = data->MX; const sunindextype MY = data->MY; const realtype hordc = data->hdcoef; const realtype horac = data->hacoef; const realtype verdc = data->vdcoef; /* Extract pointers to vector data */ const realtype *vdata = N_VGetDeviceArrayPointer_OpenMPDEV(v); realtype *Jvdata = N_VGetDeviceArrayPointer_OpenMPDEV(Jv); N_VConst(ZERO, Jv); /* Get device */ dev = omp_get_default_device(); #pragma omp target map(to:MX,MY,hordc,horac,verdc) is_device_ptr(vdata,Jvdata) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (k=0; k<MX*MY; k++) { i = k/MY; j = k%MY; /* set the kth element of Jv */ Jvdata[k] = -TWO * (verdc + hordc) * vdata[k]; if (i != 0) Jvdata[k] += (hordc - horac) * vdata[k-MY]; if (i != MX-1) Jvdata[k] += (hordc + horac) * vdata[k+MY]; if (j != 0) Jvdata[k] += verdc * vdata[k-1]; if (j != MY-1) Jvdata[k] += verdc * vdata[k+1]; } return(0); } /* *------------------------------- * Private helper functions *------------------------------- */ /* Print first lines of output (problem description) */ static void PrintHeader(realtype reltol, realtype abstol, realtype umax, UserData data) { printf("\n2-D Advection-Diffusion Equation\n"); printf("Mesh dimensions = %ld X %ld\n", (long int) data->MX, (long int) data->MY); printf("Total system size = %ld\n", (long int) data->NEQ); #if defined(SUNDIALS_EXTENDED_PRECISION) printf("Tolerance parameters: reltol = %Lg abstol = %Lg\n\n", reltol, abstol); printf("At t = %Lg max.norm(u) =%14.6Le \n", T0, umax); #elif defined(SUNDIALS_DOUBLE_PRECISION) printf("Tolerance parameters: reltol = %g abstol = %g\n\n", reltol, abstol); printf("At t = %g max.norm(u) =%14.6e \n", T0, umax); #else printf("Tolerance parameters: reltol = %g abstol = %g\n\n", reltol, abstol); printf("At t = %g max.norm(u) =%14.6e \n", T0, umax); #endif return; } /* Print current value */ static void PrintOutput(realtype t, realtype umax, long int nst) { #if defined(SUNDIALS_EXTENDED_PRECISION) printf("At t = %4.2Lf max.norm(u) =%14.6Le nst = %4ld\n", t, umax, nst); #elif defined(SUNDIALS_DOUBLE_PRECISION) printf("At t = %4.2f max.norm(u) =%14.6e nst = %4ld\n", t, umax, nst); #else printf("At t = %4.2f max.norm(u) =%14.6e nst = %4ld\n", t, umax, nst); #endif return; } /* Get and print some final statistics */ static void PrintFinalStats(void *cvode_mem) { long lenrw, leniw ; long lenrwLS, leniwLS; long int nst, nfe, nsetups, nni, ncfn, netf; long int nli, npe, nps, ncfl, nfeLS; int flag; flag = CVodeGetWorkSpace(cvode_mem, &lenrw, &leniw); check_flag(&flag, "CVodeGetWorkSpace", 1); flag = CVodeGetNumSteps(cvode_mem, &nst); check_flag(&flag, "CVodeGetNumSteps", 1); flag = CVodeGetNumRhsEvals(cvode_mem, &nfe); check_flag(&flag, "CVodeGetNumRhsEvals", 1); flag = CVodeGetNumLinSolvSetups(cvode_mem, &nsetups); check_flag(&flag, "CVodeGetNumLinSolvSetups", 1); flag = CVodeGetNumErrTestFails(cvode_mem, &netf); check_flag(&flag, "CVodeGetNumErrTestFails", 1); flag = CVodeGetNumNonlinSolvIters(cvode_mem, &nni); check_flag(&flag, "CVodeGetNumNonlinSolvIters", 1); flag = CVodeGetNumNonlinSolvConvFails(cvode_mem, &ncfn); check_flag(&flag, "CVodeGetNumNonlinSolvConvFails", 1); flag = CVodeGetLinWorkSpace(cvode_mem, &lenrwLS, &leniwLS); check_flag(&flag, "CVodeGetLinWorkSpace", 1); flag = CVodeGetNumLinIters(cvode_mem, &nli); check_flag(&flag, "CVodeGetNumLinIters", 1); flag = CVodeGetNumPrecEvals(cvode_mem, &npe); check_flag(&flag, "CVodeGetNumPrecEvals", 1); flag = CVodeGetNumPrecSolves(cvode_mem, &nps); check_flag(&flag, "CVodeGetNumPrecSolves", 1); flag = CVodeGetNumLinConvFails(cvode_mem, &ncfl); check_flag(&flag, "CVodeGetNumLinConvFails", 1); flag = CVodeGetNumLinRhsEvals(cvode_mem, &nfeLS); check_flag(&flag, "CVodeGetNumLinRhsEvals", 1); printf("\nFinal Statistics.. \n\n"); printf("lenrw = %5ld leniw = %5ld\n", lenrw, leniw); printf("lenrwLS = %5ld leniwLS = %5ld\n", lenrwLS, leniwLS); printf("nst = %5ld\n" , nst); printf("nfe = %5ld nfeLS = %5ld\n" , nfe, nfeLS); printf("nni = %5ld nli = %5ld\n" , nni, nli); printf("nsetups = %5ld netf = %5ld\n" , nsetups, netf); printf("npe = %5ld nps = %5ld\n" , npe, nps); printf("ncfn = %5ld ncfl = %5ld\n\n", ncfn, ncfl); return; } /* Check function return value... opt == 0 means SUNDIALS function allocates memory so check if returned NULL pointer opt == 1 means SUNDIALS function returns a flag so check if flag >= 0 opt == 2 means function allocates memory so check if returned NULL pointer */ static int check_flag(void *flagvalue, const char *funcname, int opt) { int *errflag; /* Check if SUNDIALS function returned NULL pointer - no memory allocated */ if (opt == 0 && flagvalue == NULL) { fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed - returned NULL pointer\n\n", funcname); return(1); } /* Check if flag < 0 */ else if (opt == 1) { errflag = (int *) flagvalue; if (*errflag < 0) { fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed with flag = %d\n\n", funcname, *errflag); return(1); }} /* Check if function returned NULL pointer - no memory allocated */ else if (opt == 2 && flagvalue == NULL) { fprintf(stderr, "\nMEMORY_ERROR: %s() failed - returned NULL pointer\n\n", funcname); return(1); } return(0); }
GB_binop__times_uint8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__times_uint8) // A.*B function (eWiseMult): GB (_AemultB_08__times_uint8) // A.*B function (eWiseMult): GB (_AemultB_02__times_uint8) // A.*B function (eWiseMult): GB (_AemultB_04__times_uint8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__times_uint8) // A*D function (colscale): GB (_AxD__times_uint8) // D*A function (rowscale): GB (_DxB__times_uint8) // C+=B function (dense accum): GB (_Cdense_accumB__times_uint8) // C+=b function (dense accum): GB (_Cdense_accumb__times_uint8) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__times_uint8) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__times_uint8) // C=scalar+B GB (_bind1st__times_uint8) // C=scalar+B' GB (_bind1st_tran__times_uint8) // C=A+scalar GB (_bind2nd__times_uint8) // C=A'+scalar GB (_bind2nd_tran__times_uint8) // C type: uint8_t // A type: uint8_t // A pattern? 0 // B type: uint8_t // B pattern? 0 // BinaryOp: cij = (aij * bij) #define GB_ATYPE \ uint8_t #define GB_BTYPE \ uint8_t #define GB_CTYPE \ uint8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint8_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint8_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x * y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_TIMES || GxB_NO_UINT8 || GxB_NO_TIMES_UINT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__times_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__times_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__times_uint8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__times_uint8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint8_t uint8_t bwork = (*((uint8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__times_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__times_uint8) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__times_uint8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint8_t alpha_scalar ; uint8_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint8_t *) alpha_scalar_in)) ; beta_scalar = (*((uint8_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__times_uint8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__times_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__times_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__times_uint8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__times_uint8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t x = (*((uint8_t *) x_input)) ; uint8_t *Bx = (uint8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint8_t bij = GBX (Bx, p, false) ; Cx [p] = (x * bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__times_uint8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t *Ax = (uint8_t *) Ax_input ; uint8_t y = (*((uint8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint8_t aij = GBX (Ax, p, false) ; Cx [p] = (aij * y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x * aij) ; \ } GrB_Info GB (_bind1st_tran__times_uint8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t x = (*((const uint8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij * y) ; \ } GrB_Info GB (_bind2nd_tran__times_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t y = (*((const uint8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
hamcluster_1.h
// // Created by Vasiliy Ershov on 25/09/16. // #ifndef PROJECT_HAMCLUSTER_1_H #define PROJECT_HAMCLUSTER_1_H #include <common/adt/concurrent_dsu.hpp> #include <common/pipeline/config_singl.hpp> #include "HSeq.hpp" #include "kmer_data.hpp" #include "utils/logger/logger.hpp" #include "valid_hkmer_generator.hpp" namespace hammer { using HRun = HomopolymerRun; class TOneErrorClustering { private: const KMerData& data_; dsu::ConcurrentDSU clusters_; bool TryMergeClusters(const HKMer& source, const size_t source_idx, const HKMer& fixed) { auto fixed_idx = data_.checking_seq_idx(fixed); if (fixed_idx == (-1ULL)) { return false; } if (data_[fixed_idx].count > 0) { clusters_.unite(source_idx, fixed_idx); auto rSource = !source; auto rFixed = !fixed; clusters_.unite(data_.seq_idx(rSource), data_.seq_idx(rFixed)); return true; } else { return false; } } void TryCorrection(const KMerStat& source_stat, size_t source_idx) { const auto& source = source_stat.kmer; auto fixed = source; for (uint k = 0; k < K; ++k) { for (uint i = (uint)std::max(source[k].len - 1, 1); i <= (uint)(source[k].len + 1); ++i) { if (i == source[k].len) { continue; } fixed[k].len = i & 0x3F; TryMergeClusters(source, source_idx, fixed); } fixed[k].len = source[k].len; } } public: TOneErrorClustering(const KMerData& data, const uint num_threads = 16) : data_(data), clusters_(data.size()) { (void)num_threads; // stupid compiler #pragma omp parallel for num_threads(num_threads) for (size_t idx = 0; idx < data_.size(); ++idx) { if (data_[idx].count > 0) { TryCorrection(data_[idx], idx); } } } void FillClasses(std::vector<std::vector<size_t> >& clusters) { clusters_.get_sets(clusters); } }; } // namespace hammer #endif // PROJECT_HAMCLUSTER_1_H
connected_layer.c
#include "connected_layer.h" #include "batchnorm_layer.h" #include "convolutional_layer.h" #include "utils.h" #include "dark_cuda.h" #include "blas.h" #include "gemm.h" #include <math.h> #include <stdio.h> #include <stdlib.h> #include <string.h> size_t get_connected_workspace_size(layer l) { #ifdef CUDNN return get_convolutional_workspace_size(l); /* if (gpu_index >= 0) { size_t most = 0; size_t s = 0; CHECK_CUDNN(cudnnGetConvolutionForwardWorkspaceSize(cudnn_handle(), l.srcTensorDesc, l.weightDesc, l.convDesc, l.dstTensorDesc, l.fw_algo, &s)); if (s > most) most = s; CHECK_CUDNN(cudnnGetConvolutionBackwardFilterWorkspaceSize(cudnn_handle(), l.srcTensorDesc, l.ddstTensorDesc, l.convDesc, l.dweightDesc, l.bf_algo, &s)); if (s > most) most = s; CHECK_CUDNN(cudnnGetConvolutionBackwardDataWorkspaceSize(cudnn_handle(), l.weightDesc, l.ddstTensorDesc, l.convDesc, l.dsrcTensorDesc, l.bd_algo, &s)); if (s > most) most = s; return most; } */ #endif return 0; } connected_layer make_connected_layer(int batch, int steps, int inputs, int outputs, ACTIVATION activation, int batch_normalize) { int total_batch = batch*steps; int i; connected_layer l = { (LAYER_TYPE)0 }; l.type = CONNECTED; l.inputs = inputs; l.outputs = outputs; l.batch= batch; l.batch_normalize = batch_normalize; l.h = 1; l.w = 1; l.c = inputs; l.out_h = 1; l.out_w = 1; l.out_c = outputs; l.n = l.out_c; l.size = 1; l.stride = 1; l.pad = 0; l.activation = activation; l.learning_rate_scale = 1; l.output = (float*)calloc(total_batch * outputs, sizeof(float)); l.delta = (float*)calloc(total_batch * outputs, sizeof(float)); l.weight_updates = (float*)calloc(inputs * outputs, sizeof(float)); l.bias_updates = (float*)calloc(outputs, sizeof(float)); l.weights = (float*)calloc(outputs * inputs, sizeof(float)); l.biases = (float*)calloc(outputs, sizeof(float)); l.forward = forward_connected_layer; l.backward = backward_connected_layer; l.update = update_connected_layer; //float scale = 1./sqrt(inputs); float scale = sqrt(2.f/inputs); for(i = 0; i < outputs*inputs; ++i){ l.weights[i] = scale*rand_uniform(-1, 1); } for(i = 0; i < outputs; ++i){ l.biases[i] = 0; } if(batch_normalize){ l.scales = (float*)calloc(outputs, sizeof(float)); l.scale_updates = (float*)calloc(outputs, sizeof(float)); for(i = 0; i < outputs; ++i){ l.scales[i] = 1; } l.mean = (float*)calloc(outputs, sizeof(float)); l.mean_delta = (float*)calloc(outputs, sizeof(float)); l.variance = (float*)calloc(outputs, sizeof(float)); l.variance_delta = (float*)calloc(outputs, sizeof(float)); l.rolling_mean = (float*)calloc(outputs, sizeof(float)); l.rolling_variance = (float*)calloc(outputs, sizeof(float)); l.x = (float*)calloc(total_batch * outputs, sizeof(float)); l.x_norm = (float*)calloc(total_batch * outputs, sizeof(float)); } #ifdef GPU l.forward_gpu = forward_connected_layer_gpu; l.backward_gpu = backward_connected_layer_gpu; l.update_gpu = update_connected_layer_gpu; l.weights_gpu = cuda_make_array(l.weights, outputs*inputs); l.biases_gpu = cuda_make_array(l.biases, outputs); l.weight_updates_gpu = cuda_make_array(l.weight_updates, outputs*inputs); l.bias_updates_gpu = cuda_make_array(l.bias_updates, outputs); l.output_gpu = cuda_make_array(l.output, outputs*total_batch); l.delta_gpu = cuda_make_array(l.delta, outputs*total_batch); if (batch_normalize) { l.scales_gpu = cuda_make_array(l.scales, outputs); l.scale_updates_gpu = cuda_make_array(l.scale_updates, outputs); l.mean_gpu = cuda_make_array(l.mean, outputs); l.variance_gpu = cuda_make_array(l.variance, outputs); l.rolling_mean_gpu = cuda_make_array(l.mean, outputs); l.rolling_variance_gpu = cuda_make_array(l.variance, outputs); l.mean_delta_gpu = cuda_make_array(l.mean, outputs); l.variance_delta_gpu = cuda_make_array(l.variance, outputs); l.x_gpu = cuda_make_array(l.output, total_batch*outputs); l.x_norm_gpu = cuda_make_array(l.output, total_batch*outputs); } #ifdef CUDNN create_convolutional_cudnn_tensors(&l); cudnn_convolutional_setup(&l, cudnn_fastest); // cudnn_fastest, cudnn_smallest l.workspace_size = get_connected_workspace_size(l); #endif // CUDNN #endif // GPU fprintf(stderr, "connected %4d -> %4d\n", inputs, outputs); return l; } void update_connected_layer(connected_layer l, int batch, float learning_rate, float momentum, float decay) { axpy_cpu(l.outputs, learning_rate/batch, l.bias_updates, 1, l.biases, 1); scal_cpu(l.outputs, momentum, l.bias_updates, 1); if(l.batch_normalize){ axpy_cpu(l.outputs, learning_rate/batch, l.scale_updates, 1, l.scales, 1); scal_cpu(l.outputs, momentum, l.scale_updates, 1); } axpy_cpu(l.inputs*l.outputs, -decay*batch, l.weights, 1, l.weight_updates, 1); axpy_cpu(l.inputs*l.outputs, learning_rate/batch, l.weight_updates, 1, l.weights, 1); scal_cpu(l.inputs*l.outputs, momentum, l.weight_updates, 1); } void forward_connected_layer(connected_layer l, network_state state) { int i; fill_cpu(l.outputs*l.batch, 0, l.output, 1); int m = l.batch; int k = l.inputs; int n = l.outputs; float *a = state.input; float *b = l.weights; float *c = l.output; gemm(0,1,m,n,k,1,a,k,b,k,1,c,n); if(l.batch_normalize){ if(state.train){ mean_cpu(l.output, l.batch, l.outputs, 1, l.mean); variance_cpu(l.output, l.mean, l.batch, l.outputs, 1, l.variance); scal_cpu(l.outputs, .95f, l.rolling_mean, 1); axpy_cpu(l.outputs, .05f, l.mean, 1, l.rolling_mean, 1); scal_cpu(l.outputs, .95f, l.rolling_variance, 1); axpy_cpu(l.outputs, .05f, l.variance, 1, l.rolling_variance, 1); copy_cpu(l.outputs*l.batch, l.output, 1, l.x, 1); normalize_cpu(l.output, l.mean, l.variance, l.batch, l.outputs, 1); copy_cpu(l.outputs*l.batch, l.output, 1, l.x_norm, 1); } else { normalize_cpu(l.output, l.rolling_mean, l.rolling_variance, l.batch, l.outputs, 1); } scale_bias(l.output, l.scales, l.batch, l.outputs, 1); } #pragma omp parallel for for(i = 0; i < l.batch; ++i){ axpy_cpu(l.outputs, 1, l.biases, 1, l.output + i*l.outputs, 1); } activate_array(l.output, l.outputs*l.batch, l.activation); } void backward_connected_layer(connected_layer l, network_state state) { int i; gradient_array(l.output, l.outputs*l.batch, l.activation, l.delta); for(i = 0; i < l.batch; ++i){ axpy_cpu(l.outputs, 1, l.delta + i*l.outputs, 1, l.bias_updates, 1); } if(l.batch_normalize){ backward_scale_cpu(l.x_norm, l.delta, l.batch, l.outputs, 1, l.scale_updates); scale_bias(l.delta, l.scales, l.batch, l.outputs, 1); mean_delta_cpu(l.delta, l.variance, l.batch, l.outputs, 1, l.mean_delta); variance_delta_cpu(l.x, l.delta, l.mean, l.variance, l.batch, l.outputs, 1, l.variance_delta); normalize_delta_cpu(l.x, l.mean, l.variance, l.mean_delta, l.variance_delta, l.batch, l.outputs, 1, l.delta); } int m = l.outputs; int k = l.batch; int n = l.inputs; float *a = l.delta; float *b = state.input; float *c = l.weight_updates; gemm(1,0,m,n,k,1,a,m,b,n,1,c,n); m = l.batch; k = l.outputs; n = l.inputs; a = l.delta; b = l.weights; c = state.delta; if(c) gemm(0,0,m,n,k,1,a,k,b,n,1,c,n); } void denormalize_connected_layer(layer l) { int i, j; for(i = 0; i < l.outputs; ++i){ float scale = l.scales[i]/sqrt(l.rolling_variance[i] + .000001f); for(j = 0; j < l.inputs; ++j){ l.weights[i*l.inputs + j] *= scale; } l.biases[i] -= l.rolling_mean[i] * scale; l.scales[i] = 1; l.rolling_mean[i] = 0; l.rolling_variance[i] = 1; } } void statistics_connected_layer(layer l) { if(l.batch_normalize){ printf("Scales "); print_statistics(l.scales, l.outputs); /* printf("Rolling Mean "); print_statistics(l.rolling_mean, l.outputs); printf("Rolling Variance "); print_statistics(l.rolling_variance, l.outputs); */ } printf("Biases "); print_statistics(l.biases, l.outputs); printf("Weights "); print_statistics(l.weights, l.outputs); } #ifdef GPU void pull_connected_layer(connected_layer l) { cuda_pull_array(l.weights_gpu, l.weights, l.inputs*l.outputs); cuda_pull_array(l.biases_gpu, l.biases, l.outputs); cuda_pull_array(l.weight_updates_gpu, l.weight_updates, l.inputs*l.outputs); cuda_pull_array(l.bias_updates_gpu, l.bias_updates, l.outputs); if (l.batch_normalize){ cuda_pull_array(l.scales_gpu, l.scales, l.outputs); cuda_pull_array(l.rolling_mean_gpu, l.rolling_mean, l.outputs); cuda_pull_array(l.rolling_variance_gpu, l.rolling_variance, l.outputs); } CHECK_CUDA(cudaPeekAtLastError()); } void push_connected_layer(connected_layer l) { cuda_push_array(l.weights_gpu, l.weights, l.inputs*l.outputs); cuda_push_array(l.biases_gpu, l.biases, l.outputs); cuda_push_array(l.weight_updates_gpu, l.weight_updates, l.inputs*l.outputs); cuda_push_array(l.bias_updates_gpu, l.bias_updates, l.outputs); if (l.batch_normalize){ cuda_push_array(l.scales_gpu, l.scales, l.outputs); cuda_push_array(l.rolling_mean_gpu, l.rolling_mean, l.outputs); cuda_push_array(l.rolling_variance_gpu, l.rolling_variance, l.outputs); } CHECK_CUDA(cudaPeekAtLastError()); } void update_connected_layer_gpu(connected_layer l, int batch, float learning_rate, float momentum, float decay) { axpy_ongpu(l.outputs, learning_rate/batch, l.bias_updates_gpu, 1, l.biases_gpu, 1); scal_ongpu(l.outputs, momentum, l.bias_updates_gpu, 1); if(l.batch_normalize){ axpy_ongpu(l.outputs, learning_rate/batch, l.scale_updates_gpu, 1, l.scales_gpu, 1); scal_ongpu(l.outputs, momentum, l.scale_updates_gpu, 1); } axpy_ongpu(l.inputs*l.outputs, -decay*batch, l.weights_gpu, 1, l.weight_updates_gpu, 1); axpy_ongpu(l.inputs*l.outputs, learning_rate/batch, l.weight_updates_gpu, 1, l.weights_gpu, 1); scal_ongpu(l.inputs*l.outputs, momentum, l.weight_updates_gpu, 1); } void forward_connected_layer_gpu(connected_layer l, network_state state) { fill_ongpu(l.outputs*l.batch, 0, l.output_gpu, 1); int m = l.batch; int k = l.inputs; int n = l.outputs; float * a = state.input; float * b = l.weights_gpu; float * c = l.output_gpu; #ifdef CUDNN float one = 1; // alpha[0], beta[0] float alpha = 1, beta = 0; CHECK_CUDNN(cudnnConvolutionForward(cudnn_handle(), &alpha, //&one, l.srcTensorDesc, state.input, l.weightDesc, l.weights_gpu, l.convDesc, l.fw_algo, state.workspace, l.workspace_size, &beta, //&one, l.dstTensorDesc, l.output_gpu)); #else // CUDNN gemm_ongpu(0,1,m,n,k,1,a,k,b,k,1,c,n); #endif // CUDNN if (l.batch_normalize) { forward_batchnorm_layer_gpu(l, state); } else { add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.outputs, 1); } //for(i = 0; i < l.batch; ++i) axpy_ongpu(l.outputs, 1, l.biases_gpu, 1, l.output_gpu + i*l.outputs, 1); activate_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation); } void backward_connected_layer_gpu(connected_layer l, network_state state) { int i; constrain_ongpu(l.outputs*l.batch, 1, l.delta_gpu, 1); gradient_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation, l.delta_gpu); for(i = 0; i < l.batch; ++i){ axpy_ongpu(l.outputs, 1, l.delta_gpu + i*l.outputs, 1, l.bias_updates_gpu, 1); } if(l.batch_normalize){ backward_batchnorm_layer_gpu(l, state); } #ifdef CUDNN_DISABLED float one = 1; // calculate conv weight updates // if used: beta=1 then loss decreases faster CHECK_CUDNN(cudnnConvolutionBackwardFilter(cudnn_handle(), &one, l.srcTensorDesc, state.input, l.ddstTensorDesc, l.delta_gpu, l.convDesc, l.bf_algo, state.workspace, l.workspace_size, &one, l.dweightDesc, l.weight_updates_gpu)); if (state.delta) { // http://docs.nvidia.com/deeplearning/sdk/cudnn-developer-guide/index.html#cudnnConvolutionBackwardData // calculate delta for the next layer CHECK_CUDNN(cudnnConvolutionBackwardData(cudnn_handle(), &one, l.weightDesc, l.weights_gpu, l.ddstTensorDesc, l.delta_gpu, l.convDesc, l.bd_algo, state.workspace, l.workspace_size, &one, l.dsrcTensorDesc, state.delta)); } #else // CUDNN int m = l.outputs; int k = l.batch; int n = l.inputs; float * a = l.delta_gpu; float * b = state.input; float * c = l.weight_updates_gpu; gemm_ongpu(1,0,m,n,k,1,a,m,b,n,1,c,n); m = l.batch; k = l.outputs; n = l.inputs; a = l.delta_gpu; b = l.weights_gpu; c = state.delta; if(c) gemm_ongpu(0,0,m,n,k,1,a,k,b,n,1,c,n); #endif // CUDNN } #endif
ten_tusscher_3_Fig4b.c
#include "model_common.h" #include <assert.h> #include <stdlib.h> #include "ten_tusscher_3_Fig4b.h" #define ENDO GET_CELL_MODEL_DATA(init_cell_model_data) { assert(cell_model); if(get_initial_v) cell_model->initial_v = INITIAL_V; if(get_neq) cell_model->number_of_ode_equations = NEQ; } SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) { // Original values from Sachetto /* sv[0] = INITIAL_V; // V; millivolt sv[1] = 0.005619;; //M sv[2] = 0.551265; //H sv[3] = 0.246963; //J sv[4] = 0.060715; //Xr1 sv[5] = 0.018069; //Xs sv[6] = 0.201403; //S sv[7] = 0.842582; //F sv[8] = 0.997207; //F2 sv[9] = 0.000072; //D_INF sv[10] = 0.0; //R_INF sv[11] = 0.412887; //Xr2_INF} */ // Steady-State Healthy cell after 12000ms with INaCa = 60% /* sv[0] = -86.3071; sv[1] = 0.00136401; sv[2] = 0.77323; sv[3] = 0.768732; sv[4] = 0.000364856; sv[5] = 0.00427237; sv[6] = 0.447765; sv[7] = 0.889209; sv[8] = 0.999561; sv[9] = 2.92107e-05; sv[10] = 2.01946e-08; sv[11] = 0.482373; */ // Steady-State Healthy cell after 12000ms with INaCa = 100% /* sv[0] = -86.0262; sv[1] = 0.00144818; sv[2] = 0.766091; sv[3] = 0.761181; sv[4] = 0.000413456; sv[5] = 0.00430467; sv[6] = 0.446116; sv[7] = 0.890307; sv[8] = 0.999543; sv[9] = 3.03253e-05; sv[10] = 2.11624e-08; sv[11] = 0.479452; */ // Steady-State Full fibrotic cell after 12000ms with INaCa = 60% /* sv[0] = -79.5526; sv[1] = 0.0056175; sv[2] = 0.556426; sv[3] = 0.544609; sv[4] = 0.00112076; sv[5] = 0.00489477; sv[6] = 0.503451; sv[7] = 0.985002; sv[8] = 0.998849; sv[9] = 7.18864e-05; sv[10] = 6.22501e-08; sv[11] = 0.412904; */ // Steady-State Full fibrotic cell after 12000ms with INaCa = 100% /* sv[0] = -79.3449; sv[1] = 0.00586152; sv[2] = 0.548496; sv[3] = 0.535665; sv[4] = 0.00120839; sv[5] = 0.00496648; sv[6] = 0.502701; sv[7] = 0.985158; sv[8] = 0.998814; sv[9] = 7.39051e-05; sv[10] = 6.4443e-08; sv[11] = 0.410807; */ // Steady-State Sachetto sv[0] = -79.342369; sv[1] = 0.005865; sv[2] = 0.547718; sv[3] = 0.311839; sv[4] = 0.051881; sv[5] = 0.015125; sv[6] = 0.209739; sv[7] = 0.829223; sv[8] = 0.998682; sv[9] = 0.000074; sv[10] = 0.000000; sv[11] = 0.410782; } SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu) { uint32_t sv_id; real *fibrosis; // Default values for a healthy cell /////////// real atpi = 6.8f; real Ko = 5.4f; real Ki_mult = 1.0f; real acidosis = 0.0; real K1_mult = 1.0f; //////////////////////////////////// if(extra_data) { atpi = ((real*)extra_data)[0]; //value Ko = ((real*)extra_data)[1]; //value Ki_mult = ((real*)extra_data)[2]; //value K1_mult = ((real*)extra_data)[3]; //value acidosis = ((real*)extra_data)[4]; //value fibrosis = ((real*)extra_data) + 5; //pointer } else { fibrosis = calloc(num_cells_to_solve, sizeof(real)); } int i; #pragma omp parallel for private(sv_id) for (i = 0; i < num_cells_to_solve; i++) { if(cells_to_solve) sv_id = cells_to_solve[i]; else sv_id = i; for (int j = 0; j < num_steps; ++j) { solve_model_ode_cpu(dt, sv + (sv_id * NEQ), stim_currents[i], fibrosis[i], atpi, Ko, Ki_mult, K1_mult, acidosis); } } if(extra_data == NULL) free(fibrosis); } void solve_model_ode_cpu(real dt, real *sv, real stim_current, real fibrosis, real atpi, real Ko, real Ki_mult, real K1_mult, real acidosis) { assert(sv); real rY[NEQ], rDY[NEQ]; for(int i = 0; i < NEQ; i++) rY[i] = sv[i]; RHS_cpu(rY, rDY, stim_current, dt, fibrosis, atpi, Ko, Ki_mult, K1_mult, acidosis); //THIS MODEL USES THE Rush Larsen Method TO SOLVE THE EDOS sv[0] = dt*rDY[0] + rY[0]; sv[1] = rDY[1]; sv[2] = rDY[2]; sv[3] = rDY[3]; sv[4] = rDY[4]; sv[5] = rDY[5]; sv[6] = rDY[6]; sv[7] = rDY[7]; sv[8] = rDY[8]; sv[9] = rDY[9]; sv[10] = rDY[10]; sv[11] = rDY[11]; } void RHS_cpu(const real *sv, real *rDY_, real stim_current, real dt, real fibrosis, real atpi, real Ko, real Ki_multiplicator, real K1_multiplicator, real acidosis) { // State variables const real svolt = sv[0]; // Membrane variable real svolt_acid = svolt; if( fibrosis == 0.0f ) { //These values are from In Electrophysiologic effects of acute myocardial ischemia: a theoretical //study of altered cell excitability and action potential duration svolt_acid = svolt - 3.4f; } const real sm = sv[1]; const real sh = sv[2]; const real sj = sv[3]; const real sxr1 = sv[4]; const real sxs = sv[5]; const real ss = sv[6]; const real sf = sv[7]; const real sf2 = sv[8]; const real D_INF = sv[9]; const real R_INF = sv[10]; const real Xr2_INF = sv[11]; // BUGGY //const real Xr2_INF = sv[10]; //const real R_INF = sv[11]; const real natp = 0.24; // K dependence of ATP-sensitive K current const real nicholsarea = 0.00005; // Nichol's areas (cm^2) const real hatp = 2; // Hill coefficient //Extracellular potassium concentration was elevated //from its default value of 5.4 mM to values between 6.0 and 8.0 mM //Ref: A Comparison of Two Models of Human Ventricular Tissue: Simulated Ischemia and Re-entry real Ko_change = 5.4f - Ko; Ko = Ko + Ko_change*fibrosis; //Linear changing of atpi depending on the fibrosis and distance from the center of the scar (only for border zone cells) real atpi_change = 6.8f - atpi; atpi = atpi + atpi_change*fibrosis; //real katp = 0.306; //Ref: A Comparison of Two Models of Human Ventricular Tissue: Simulated Ischaemia and Re-entry const real katp = -0.0942857142857*atpi + 0.683142857143; //Ref: A Comparison of Two Models of Human Ventricular Tissue: Simulated Ischaemia and Re-entry const real patp = 1/(1 + pow((atpi/katp),hatp)); const real gkatp = 0.000195/nicholsarea; const real gkbaratp = gkatp*patp*pow((Ko/4),natp); const real katp2= 1.4; const real hatp2 = 2.6; const real pcal = 1.0/(1.0 + pow((katp2/atpi),hatp2)); const real Cao=2.0; const real Nao=140.0; const real Cai=0.00007; const real Nai=7.67; const real Ki=138.3; //Constants const real R=8314.472; const real F=96485.3415; const real T=310.0; const real RTONF=(R*T)/F; //Parameters for currents //Parameters for IKr const real Gkr=0.101; //Parameters for Iks const real pKNa=0.03; #ifdef EPI const real Gks=0.257; #endif #ifdef ENDO const real Gks=0.392; #endif #ifdef MCELL const real Gks=0.098; #endif //Parameters for Ik1 const real GK1=5.405; //Parameters for Ito #ifdef EPI const real Gto=0.294; #endif #ifdef ENDO const real Gto=0.073; #endif #ifdef MCELL const real Gto=0.294; #endif //Parameters for INa const real GNa=14.838*0.5; //ACIDOSIS //Parameters for IbNa const real GbNa=0.00029; //Parameters for INaK const real KmK=1.0; const real KmNa=40.0; const real knak=2.724; //Parameters for ICaL const real GCaL=0.2786*pcal*0.5; //ACIDOSIS //Parameters for IbCa const real GbCa=0.000592; //Parameters for INaCa const real knaca=1000; const real KmNai=87.5; const real KmCa=1.38; const real ksat=0.1; const real n=0.35; //Parameters for IpCa const real GpCa=0.1238; const real KpCa=0.0005; //Parameters for IpK; const real GpK=0.0293; const real Ek=RTONF*(log((Ko/Ki))); const real Ena=RTONF*(log((Nao/Nai))); const real Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); const real Eca=0.5*RTONF*(log((Cao/Cai))); real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real IKatp; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real Xr1_INF; real Xr2_INF_new; real TAU_Xr1; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF_new; real S_INF; real TAU_S; real Af; real Bf; real Cf; real Af2; real Bf2; real Cf2; real D_INF_new; real TAU_F; real F_INF; real TAU_F2; real F2_INF; real sItot; //Needed to compute currents Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*((svolt-3.4)-Ena); //ACIDOSIS ICaL=GCaL*D_INF*sf*sf2*((svolt-3.4)-60); //ACIDOSIS Ito=Gto*R_INF*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*Xr2_INF*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); real theta = 1.0; INaCa = INaCa*theta; //ACIDOSIS INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); IKatp = gkbaratp*(svolt-Ek); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + IKatp + stim_current; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF_new=1./(1.+exp((svolt-(-88.))/24.)); Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=(1400./(sqrt(1.+exp((5.-svolt)/6)))); Bxs=(1./(1.+exp((svolt-35.)/15.))); TAU_Xs=Axs*Bxs+80; #ifdef EPI R_INF_new=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; #endif #ifdef ENDO R_INF_new=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+28)/5.)); TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.; #endif #ifdef MCELL R_INF_new=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; #endif D_INF_new=1./(1.+exp((-8-svolt)/7.5)); F_INF=1./(1.+exp((svolt+20)/7)); Af=1102.5*exp(-(svolt+27)*(svolt+27)/225); Bf=200./(1+exp((13-svolt)/10.)); Cf=(180./(1+exp((svolt+30)/10)))+20; TAU_F=Af+Bf+Cf; F2_INF=0.67/(1.+exp((svolt+35)/7))+0.33; Af2=600*exp(-(svolt+27)*(svolt+27)/170); Bf2=7.75/(1.+exp((25-svolt)/10)); Cf2=16/(1.+exp((svolt+30)/10)); TAU_F2=Af2+Bf2+Cf2; //update voltage rDY_[0] = -sItot; //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[6]= S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[7] =F_INF-(F_INF-sf)*exp(-dt/TAU_F); rDY_[8] =F2_INF-(F2_INF-sf2)*exp(-dt/TAU_F2); rDY_[9] = D_INF_new; rDY_[10] = R_INF_new; rDY_[11] = Xr2_INF_new; }
algorithm.h
#ifndef RCLUSTERP_ALGORITHM_H #define RCLUSTERP_ALGORITHM_H #include <limits> #include <utility> #include <algorithm> #include <functional> #include <stack> #include <Rclusterpp/cluster.h> #include <Rclusterpp/util.h> namespace Rclusterpp { template<class RandomIterator, class Distancer> std::pair<RandomIterator, typename Distancer::result_type> nearest_neighbor( const RandomIterator& first, const RandomIterator& last, Distancer distancer, typename Distancer::result_type max_dist=std::numeric_limits<typename Distancer::result_type>::max() ) { typedef typename Distancer::result_type Dist_t; RandomIterator min_i = last; Dist_t min_d = max_dist; #ifdef _OPENMP #pragma omp parallel shared(min_i, min_d, distancer) #endif { RandomIterator min_i_l; Dist_t min_d_l = min_d; #ifdef _OPENMP #pragma omp for nowait #endif for (ssize_t i=0; i<(last-first); i++) { Dist_t dist = distancer(*(first+i), min_d_l); if (dist < min_d_l) { min_i_l = first + i; min_d_l = dist; } } #ifdef _OPENMP #pragma omp critical #endif { if (min_d_l < min_d) { min_i = min_i_l; min_d = min_d_l; } } } return std::make_pair(min_i, min_d); } template<class ClusteringMethod, class ClusterVector> void cluster_via_rnn(ClusteringMethod method, ClusterVector& clusters) { typedef ClusterVector clusters_type; typedef typename clusters_type::cluster_type cluster_type; typedef typename ClusteringMethod::distance_type distance_type; // Result from nearest neighbor scan typedef std::pair<typename clusters_type::iterator, distance_type> nearn_type; #define nn_cluster(x) (x).first #define distance_to_nn(x) (x).second // Nearest neighbor chain typedef std::pair<typename clusters_type::value_type, distance_type> entry_type; std::stack<entry_type> chain; #define cluster_at_tip(x) (x).top().first #define distance_to_tip(x) (x).top().second // Expand the size of clusters vector to the contain exactly the newly created clusters size_t initial_clusters = clusters.size(), result_clusters = (initial_clusters * 2) - 1; clusters.reserve(result_clusters); // List of valid clusters (used in mer Util::IndexList valid(initial_clusters); typename clusters_type::iterator next_unchained = clusters.begin(); while (clusters.size() != result_clusters) { if (chain.empty()) { // Pick next "unchained" cluster as default chain.push( entry_type(*next_unchained, std::numeric_limits<distance_type>::max()) ); ++next_unchained; } else { // Find next nearest neighbor from remaining "unchained" clusters nearn_type nn = nearest_neighbor( next_unchained, clusters.end(), Util::cluster_bind(method.distancer, cluster_at_tip(chain)), // Bind tip into distance function for computing nearest neighbor distance_to_tip(chain) ); if (nn.first != clusters.end()) { std::iter_swap(next_unchained, nn_cluster(nn)); chain.push( entry_type(*next_unchained, distance_to_nn(nn)) ); ++next_unchained; } else { // Tip of chain is recursive nearest neighbor cluster_type* r = cluster_at_tip(chain); distance_type d = distance_to_tip(chain); chain.pop(); cluster_type* l = cluster_at_tip(chain); chain.pop(); // Remove "tip" and "next tip" from chain and merge into new cluster appended to "unchained" clusters cluster_type* cn = ClusterVector::make_cluster(std::min(l->idx(), r->idx()), l, r, d); valid.remove(std::max(r->idx(), l->idx())); method.merger(*cn, *(cn->parent1()), *(cn->parent2()), valid); clusters.push_back(cn); } } } // Cleanup cluster listing // Re-order the clusters, with initial clusters in the beginning, ordered by id // from -1 .. -initial_clusters, followed by the agglomerated clusters sorted by // increasing disimilarity. Stable partition and stable sorting is required for // the latter to ensure merge order is maintaining for clusters with identical // dissimilarity. // Note, sort requires strict weak ordering and will fail in a data dependent way // if the comparison function does not satisfy that requirement typename clusters_type::iterator part = std::stable_partition(clusters.begin(), clusters.end(), std::mem_fun(&cluster_type::initial)); std::sort(clusters.begin(), part, &compare_id<cluster_type>); std::stable_sort(part, clusters.end(), std::ptr_fun(&compare_disimilarity<cluster_type>)); for (size_t i=initial_clusters; i<result_clusters; i++) { clusters[i]->set_id(i - initial_clusters + 1); // Use R hclust 1-indexed convention for Id's } } namespace { typedef std::pair<size_t, size_t> Merge_t; inline Merge_t make_merge(size_t from, size_t into) { return std::make_pair(from, into); } inline size_t from(const Merge_t& m) { return m.first; } inline size_t into(const Merge_t& m) { return m.second; } template<class Distance> struct MergeCMP { const Distance& height; MergeCMP(const Distance& height_) : height(height_) {} bool operator()(const Merge_t& a, const Merge_t& b) const { return height[from(a)] < height[from(b)]; } }; } // end of anonymous namespace template<class Distancer, class ClusterVector> void cluster_via_slink(const Distancer& distancer, ClusterVector& clusters) { typedef typename Distancer::result_type distance_type; size_t initial_clusters = clusters.size(), result_clusters = (initial_clusters * 2) - 1; clusters.reserve(result_clusters); std::vector<size_t> P = std::vector<size_t>(initial_clusters); std::vector<distance_type> L = std::vector<distance_type>(initial_clusters); std::vector<distance_type> M = std::vector<distance_type>(initial_clusters); for (size_t i=0; i<initial_clusters; i++) { // Step 1: Initialize P[i] = i; L[i] = std::numeric_limits<distance_type>::max(); // Step 2: Build out pairwise distances from objects in pointer // represenation to the new object #ifdef _OPENMP #pragma omp parallel for shared(i, M) #endif for (ssize_t j=0; j<(ssize_t)i; j++) { M[j] = distancer(i, j); } // Step 3: Update M, P, L for (size_t j=0; j<i; j++) { distance_type l = L[j], m = M[j]; if (l >= m) { M[P[j]] = std::min(M[P[j]], l); L[j] = m; P[j] = i; } else { M[P[j]] = std::min(M[P[j]], m); } } // Step 4: Actualize the clusters for (size_t j=0; j<i; j++) { if (L[j] >= L[P[j]]) P[j] = i; } } // Convert the pointer representation to dendogram std::vector<Merge_t> merges = std::vector<Merge_t>(initial_clusters-1); for (size_t i=0; i<(initial_clusters-1); i++) { merges[i] = make_merge(i, P[i]); // from, into } std::sort(merges.begin(), merges.end(), MergeCMP<std::vector<distance_type> >(L)); for (size_t i=0; i<initial_clusters; i++) { P[i] = i; } for (size_t i=0; i<(initial_clusters-1); i++) { size_t f = from(merges[i]), t = into(merges[i]); clusters.push_back(ClusterVector::make_cluster( 0, clusters[P[f]], clusters[P[t]], L[f] )); P[t] = i + initial_clusters; } for (size_t i=initial_clusters; i<result_clusters; i++) { clusters[i]->set_id(i - initial_clusters + 1); // Use R hclust 1-indexed convention for Id's } } } // end of Rclustercpp namespace #endif
recolorCube.c
/* * Copyright 2014 NeuroData (http://neurodata.io) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * Recolor Slice Function * Naive implementation */ #include<stdint.h> #include<omp.h> #include<ndlib.h> /*OpenMP implementation for 32-bit annotations*/ void recolorCubeOMP32 ( uint32_t * cutout, int xdim, int ydim, uint32_t * imagemap, uint32_t * rgbColor) { int i,j; #pragma omp parallel num_threads( omp_get_max_threads() ) { #pragma omp for private(i,j) schedule(dynamic) for ( i=0; i<xdim; i++) for ( j=0; j<ydim; j++) if ( cutout [(i*ydim)+j] != 0 ) imagemap [(i*ydim)+j] = rgbColor[ cutout [(i*ydim)+j] % 217 ]; } } /*OpenMP implementation for 64-bit annotations*/ void recolorCubeOMP64 ( uint64_t * cutout, int xdim, int ydim, uint64_t * imagemap, uint64_t * rgbColor) { int i,j; #pragma omp parallel num_threads( omp_get_max_threads() ) { #pragma omp for private(i,j) schedule(dynamic) for ( i=0; i<xdim; i++) for ( j=0; j<ydim; j++) if ( cutout [(i*ydim)+j] != 0 ) imagemap [(i*ydim)+j] = rgbColor[ cutout [(i*ydim)+j] % 217 ]; } }
example.c
// PWD008: Unprotected multithreading recurrence due to // out-of-dimension-bounds array access // https://www.appentra.com/knowledge/checks/pwd008 void foo() { int A[5][5]; #pragma omp parallel for for (int i = 1; i < 5; ++i) { for (int j = 0; j < 5; ++j) { A[i][j] += A[i][j-1]; } } }
GB_unop__sin_fc32_fc32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__sin_fc32_fc32 // op(A') function: GB_unop_tran__sin_fc32_fc32 // C type: GxB_FC32_t // A type: GxB_FC32_t // cast: GxB_FC32_t cij = aij // unaryop: cij = csinf (aij) #define GB_ATYPE \ GxB_FC32_t #define GB_CTYPE \ GxB_FC32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = csinf (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC32_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC32_t z = aij ; \ Cx [pC] = csinf (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_SIN || GxB_NO_FC32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__sin_fc32_fc32 ( GxB_FC32_t *Cx, // Cx and Ax may be aliased const GxB_FC32_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = csinf (z) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__sin_fc32_fc32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
ep_seq.c
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <pthread.h> #include <math.h> #include <omp.h> #include "util.h" #define TRUE 1 #define FALSE 0 #define DEBUG 1 #define NMAX 1000 #define MAX_LINE 256 /*Intervalo [0, 255]*/ #define RGB_SIZE 256 #define PI 3.14159265359 long int x; int main(int argc, char **argv) { FILE *arq1, *arq2; char *infile, *outfile; char a[MAX_LINE]; int nr_inter, nr_proc, nr_threads; int i, j, k, cont, columns, lines, comp_max_val; float val, distribute; float gx, gy, g; Pixel **M, **M2, **aux; /*Matriz de pixels*/ i = j = k = 0; /*Usemode*/ if (argc < 5) { printf("Modo de usar:\n\tArg1: nome do arquivo de entrada;\n\tArg2: nome do arquivo de saída\n\t"); printf("Arg3: número de iterações;\n\tArg4: número de processadores.\n\t"); exit(1); } infile = argv[1]; outfile = argv[2]; nr_inter = atoi(argv[3]); nr_proc = atoi(argv[4]); /*nr_threads = 2 * nr_proc;*/ /*Considerando hyperthread*/ /*id = malloc(nr_threads * sizeof(int));*/ arq1 = fopen(infile, "r"); if (arq1 == NULL) printf("Erro, não foi possível abrir o arquivo\n"); else { /*Read the input file*/ if (DEBUG) printf("Consegui abrir o arquivo!\n"); cont = 0; while ((a[0] = fgetc(arq1)) != EOF) { if (a[0] == '#' || a[0] == 'P') { fgets(a, MAX_LINE, arq1); if (DEBUG) printf("Ignorando comentários...\n"); } else if (cont == 0) { ungetc(a[0], arq1); fscanf(arq1,"%d %d\n", &columns, &lines); fscanf(arq1,"%d\n", &comp_max_val); cont++; if (DEBUG) { printf("Num_linhas = %d, num_colunas = %d\n", lines, columns); printf("comp_max_val = %d\n", comp_max_val); } /*Alocação das matrizes*/ M = (Pixel **) malloc(lines * sizeof(Pixel*)); M2 = (Pixel **) malloc(lines * sizeof(Pixel*)); for (i = 0; i < lines; i++) { M[i] = (Pixel *) malloc(columns * sizeof(Pixel)); M2[i] = (Pixel *) malloc(columns * sizeof(Pixel)); } } else { ungetc(a[0], arq1); for (i = 0; i < lines; i++) { for (j = 0; j < columns; j++) { fscanf(arq1, "%f %f %f", &M[i][j].R, &M[i][j].G, &M[i][j].B); M[i][j].R /= RGB_SIZE; M[i][j].G = (2*PI * M[i][j].G) / RGB_SIZE; M[i][j].B /= RGB_SIZE; /* Calcular Rx, Ry, Bx e By quando ler a entrada \/*/ M[i][j].Rx = horizontal_component(M[i][j].R, M[i][j].G); M[i][j].Bx = horizontal_component(M[i][j].B, M[i][j].G); M[i][j].Ry = vertical_component(M[i][j].R, M[i][j].G); M[i][j].By = vertical_component(M[i][j].B, M[i][j].G); } } break; } } } fclose(arq1); if (DEBUG) printf("Arquivo lido!\n"); /*IMPORTANTE: As bordas nunca se alteram.*/ for (k = 0; k < nr_inter; k++) { aux = M; M = M2; M2 = aux; for (i = 1; i < lines - 1; i++) { for (j = 1; j < columns - 1; j++) { if (M[i][j].Rx > 0) { if (j != columns -1) { val = transfer(M[i][j+1].R, M[i][j].Rx); M2[i][j+1].Rx += val; M2[i][j].Rx -= val; } if (j != 1) { val = transfer(M[i][j-1].B, M[i][j].Bx); M2[i][j-1].Bx += val; /*Recebe no sentido oposto*/ M2[i][j].Bx -= val; } } else { /*Recebe um valor positivo*/ if (j != 1) { val = transfer(M[i][j-1].R, M[i][j].Rx); M2[i][j-1].Rx -= val; M2[i][j].Rx += val; } if (j != columns - 1) { val = transfer(M[i][j+1].B, M[i][j].Bx); M2[i][j+1].Bx -= val; /*Recebe no sentido oposto*/ M2[i][j].Bx += val; } } if (M[i][j].Ry > 0) { if (i != 1) { val = transfer(M[i-1][j].R, M[i][j].Ry); M2[i-1][j].Ry += val; M2[i][j].Ry -= val; } if (i != lines - 1) { val = transfer(M[i+1][j].B, M[i][j].By); M2[i+1][j].By += val; M2[i][j].By -= val; } } else { /*Recebe um valor positivo*/ if (i != lines - 1) { val = transfer(M[i+1][j].R, M[i][j].Ry); M2[i+1][j].Ry -= val; M2[i][j].Ry += val; } if (i != 1) { val = transfer(M[i-1][j].B, M[i][j].By); M2[i-1][j].By -= val; M2[i][j].By += val; } } } } /*O bloco abaixo checa se os pixels vizinhos estouraram*/ for (i = 1; i < lines - 1; i++) { for (j = 1; j < columns - 1; j++) { /*Paralelizar as checagens do R e B se tiver pelo menos oito threads, podendo deixar os 4 if's internos em paralelo*/ /*Checa o R*/ if (M2[i][j].R > 1) { distribute = (M2[i][j].R - 1) / 4; M2[i][j].R = 1; /*Dá para parelelizar os if's abaixo*/ /*Os if's checam se os vizinhos não estão na borda e não serão estourados*/ if (i-1 > 0 && M2[i-1][j].R + distribute < 1) M2[i-1][j].R += distribute; if (i+1 < lines && M2[i+1][j].R + distribute < 1) M2[i+1][j].R += distribute; if (j-1 > 0 && M2[i][j-1].R + distribute < 1) M2[i][j-1].R += distribute; if (j+1 < columns && M2[i][j+1].R + distribute < 1) M2[i][j+1].R += distribute; } /*Checa o B*/ if (M2[i][j].B > 1) { distribute = (M2[i][j].B - 1) / 4; M2[i][j].B = 1; /*Os if's checam se os vizinhos não estão na borda e não serão estourados*/ if (i-1 > 0 && M2[i-1][j].B + distribute < 1) M2[i-1][j].B += distribute; if (i+1 < lines && M2[i+1][j].B + distribute < 1) M2[i+1][j].B += distribute; if (j-1 > 0 && M2[i][j-1].B + distribute < 1) M2[i][j-1].B += distribute; if (j+1 < columns && M2[i][j+1].B + distribute < 1) M2[i][j+1].B += distribute; } } } /*Laço para atualizar G*/ for (i = 1; i < lines - 1; i++) { #pragma omp parallel for num_threads(nr_threads) schedule(dynamic) for (j = 1; j < columns - 1; j++) { gx = M2[i][j].Rx + M2[i][j].Bx; gy = M2[i][j].Ry + M2[i][j].By; g = sqrt((gx*gx) + (gy*gy)); M2[i][j].G += g; if (M2[i][j].G > 2 * PI) M2[i][j].G -= 2 * PI; } } } /*Feito isso, checar se algum valor ultrapassou 1 *ou ficou negativo (embora provavelmente não dê para *ficar negativo)*/ /*Escreve no arquivo de saída*/ arq2 = fopen(outfile, "w"); if (arq2 == NULL) printf("Erro, não foi possível abrir o arquivo\n"); else { /*sprintf(outfile, "%s.ppm", outfile);*/ fprintf(arq2, "P3\n%d %d\n255\n", columns, lines); for (i = 0; i < lines; i++) { for (j = 0; j < columns; j++) { fprintf(arq2, "%.3f %.3f %.3f ", (float)(RGB_SIZE* M2[i][j].R), (float)((RGB_SIZE* M2[i][j].G) / (2*PI)), (float)(RGB_SIZE* M2[i][j].B)); } fprintf(arq2, "\n"); } fprintf(stdout, "A imagem foi salva no arquivo: %s\n", outfile); fclose(arq2); } for (i = 0; i < lines; i++) { free(M[i]); free(M2[i]); } free(M); free(M2); return 0; }
GB_unop__asinh_fc32_fc32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__asinh_fc32_fc32) // op(A') function: GB (_unop_tran__asinh_fc32_fc32) // C type: GxB_FC32_t // A type: GxB_FC32_t // cast: GxB_FC32_t cij = aij // unaryop: cij = casinhf (aij) #define GB_ATYPE \ GxB_FC32_t #define GB_CTYPE \ GxB_FC32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = casinhf (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC32_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC32_t z = aij ; \ Cx [pC] = casinhf (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ASINH || GxB_NO_FC32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__asinh_fc32_fc32) ( GxB_FC32_t *Cx, // Cx and Ax may be aliased const GxB_FC32_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = casinhf (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = casinhf (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__asinh_fc32_fc32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unop__tanh_fc32_fc32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__tanh_fc32_fc32 // op(A') function: GB_unop_tran__tanh_fc32_fc32 // C type: GxB_FC32_t // A type: GxB_FC32_t // cast: GxB_FC32_t cij = aij // unaryop: cij = ctanhf (aij) #define GB_ATYPE \ GxB_FC32_t #define GB_CTYPE \ GxB_FC32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = ctanhf (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC32_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC32_t z = aij ; \ Cx [pC] = ctanhf (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_TANH || GxB_NO_FC32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__tanh_fc32_fc32 ( GxB_FC32_t *Cx, // Cx and Ax may be aliased const GxB_FC32_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = ctanhf (z) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__tanh_fc32_fc32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
ch_common.h
#ifndef _BENCH_CHOLESKY_COMMON_ #define _BENCH_CHOLESKY_COMMON_ #ifndef _GNU_SOURCE #define _GNU_SOURCE #endif #include <sys/syscall.h> #include <unistd.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <unistd.h> #include <errno.h> #include <assert.h> #include <mkl.h> #include <mpi.h> #include <omp.h> #ifdef TRACE #include "VT.h" #endif #if defined(USE_TIMING) void helper_start_timing(int tt); void helper_end_timing(int tt, double elapsed); #endif // #define SPEC_RESTRICT __restrict__ #define SPEC_RESTRICT restrict #if defined(CHAMELEON) || defined(CHAMELEON_TARGET) #include "chameleon.h" #ifndef my_print #define my_print(...) chameleon_print(0, "Cholesky", mype, __VA_ARGS__); #endif #else #ifndef my_print #define my_print(...) fprintf(stderr, __VA_ARGS__); #endif #endif #ifndef DPxMOD #define DPxMOD "0x%0*" PRIxPTR #endif #ifndef DPxPTR #define DPxPTR(ptr) ((int)(2*sizeof(uintptr_t))), ((uintptr_t) (ptr)) #endif #ifndef PRINT_DEBUG #define PRINT_DEBUG 0 #endif #ifdef _USE_HBW #include <hbwmalloc.h> #endif void dgemm_ (const char *transa, const char *transb, int *l, int *n, int *m, double *alpha, const void *a, int *lda, void *b, int *ldb, double *beta, void *c, int *ldc); void dtrsm_ (char *side, char *uplo, char *transa, char *diag, int *m, int *n, double *alpha, double *a, int *lda, double *b, int *ldb); void dsyrk_ (char *uplo, char *trans, int *n, int *k, double *alpha, double *a, int *lda, double *beta, double *c, int *ldc); void cholesky_single(const int ts, const int nt, double* A[nt][nt]); void cholesky_mpi(const int ts, const int nt, double *A[nt][nt], double *B, double *C[nt], int *block_rank); void omp_potrf(double * const A, int ts, int ld); void omp_trsm(double * SPEC_RESTRICT A, double * SPEC_RESTRICT B, int ts, int ld); void omp_gemm(double * SPEC_RESTRICT A, double * SPEC_RESTRICT B, double * SPEC_RESTRICT C, int ts, int ld); void omp_syrk(double * SPEC_RESTRICT A, double * SPEC_RESTRICT B, int ts, int ld); int get_send_flags(char *send_flags, int *block_rank, int itr1_str, int itr1_end, int itr2_str, int itr2_end, int n); void get_recv_flag(char *recv_flag, int *block_rank, int itr1_str, int itr1_end, int itr2_str, int itr2_end, int n); void wait(MPI_Request *comm_req); inline static void waitall(MPI_Request *comm_req, int n) { // #ifdef TRACE // static int event_waitall = -1; // char* event_name = "waitall"; // if(event_waitall == -1) { // int ierr; // ierr = VT_funcdef(event_name, VT_NOCLASS, &event_waitall); // } // VT_begin(event_waitall); // #endif #ifdef DISABLE_TASKYIELD MPI_Waitall(n, comm_req, MPI_STATUSES_IGNORE); #else while (1) { int flag = 0; MPI_Testall(n, comm_req, &flag, MPI_STATUSES_IGNORE); if (flag) break; (void)flag; // <-- make the Cray compiler happy #if defined(CHAMELEON) || defined(CHAMELEON_TARGET) int32_t res = chameleon_taskyield(); #else #pragma omp taskyield #endif } #endif // #ifdef TRACE // VT_end(event_waitall); // #endif } void reset_send_flags(char *send_flags); #ifdef MAIN int np; int mype; int num_threads; #else extern int np; extern int mype; extern int num_threads; #endif #endif
fill_nr_s8.c
/* Copyright 2014-2018 The PySCF Developers. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. * * Author: Qiming Sun <osirpt.sun@gmail.com> */ #include <stdlib.h> #include <math.h> //#include <omp.h> #include "config.h" #include "cint.h" #include "cvhf.h" #include "nr_direct.h" #include "optimizer.h" #include "gto/gto.h" #define MAX(I,J) ((I) > (J) ? (I) : (J)) void int2e_optimizer(CINTOpt **opt, int *atm, int natm, int *bas, int nbas, double *env); /* * 8-fold symmetry, k>=l, k>=i>=j, */ static void fillnr_s8(int (*intor)(), int (*fprescreen)(), double *eri, int ish, int jsh, CVHFOpt *vhfopt, IntorEnvs *envs) { const int *atm = envs->atm; const int *bas = envs->bas; const double *env = envs->env; const int natm = envs->natm; const int nbas = envs->nbas; const int *ao_loc = envs->ao_loc; const CINTOpt *cintopt = envs->cintopt; const int nao = ao_loc[nbas]; const size_t nao2 = nao * nao; const int di = ao_loc[ish+1] - ao_loc[ish]; const int dj = ao_loc[jsh+1] - ao_loc[jsh]; double *cache = eri + di * dj * nao2; int dims[4] = {nao, nao, dj, di}; int ksh, lsh, ij, k, l; int shls[4]; double *peri; shls[2] = jsh; shls[3] = ish; for (ksh = 0; ksh <= ish; ksh++) { for (lsh = 0; lsh <= ksh; lsh++) { shls[0] = lsh; shls[1] = ksh; peri = eri + ao_loc[ksh] * nao + ao_loc[lsh]; if ((*fprescreen)(shls, vhfopt, atm, bas, env)) { (*intor)(peri, dims, shls, atm, natm, bas, nbas, env, cintopt, cache); } else { for (ij = 0; ij < di*dj; ij++) { for (k = 0; k < ao_loc[ksh+1]-ao_loc[ksh]; k++) { for (l = 0; l < ao_loc[lsh+1]-ao_loc[lsh]; l++) { peri[k*nao+l] = 0; } } peri += nao2; } } } } } static void store_ij(int (*intor)(), double *eri, double *buf, int ish, int jsh, CVHFOpt *vhfopt, IntorEnvs *envs) { const int nbas = envs->nbas; const int *ao_loc = envs->ao_loc; const int nao = ao_loc[nbas]; const size_t nao2 = nao * nao; const int di = ao_loc[ish+1] - ao_loc[ish]; const int dj = ao_loc[jsh+1] - ao_loc[jsh]; int i, j, k, l, i0, j0, kl; size_t ij0; double *peri, *pbuf; fillnr_s8(intor, vhfopt->fprescreen, buf, ish, jsh, vhfopt, envs); for (i0 = ao_loc[ish], i = 0; i < di; i++, i0++) { for (j0 = ao_loc[jsh], j = 0; j < dj; j++, j0++) { if (i0 >= j0) { ij0 = i0*(i0+1)/2 + j0; peri = eri + ij0*(ij0+1)/2; pbuf = buf + nao2 * (i*dj+j); for (kl = 0, k = 0; k < i0; k++) { for (l = 0; l <= k; l++, kl++) { peri[kl] = pbuf[k*nao+l]; } } // k == i0 for (l = 0; l <= j0; l++, kl++) { peri[kl] = pbuf[k*nao+l]; } } } } } void GTO2e_cart_or_sph(int (*intor)(), CINTOpt *cintopt, double *eri, int *ao_loc, int *atm, int natm, int *bas, int nbas, double *env) { const size_t nao = ao_loc[nbas]; IntorEnvs envs = {natm, nbas, atm, bas, env, NULL, ao_loc, NULL, cintopt, 1}; CVHFOpt *vhfopt; CVHFnr_optimizer(&vhfopt, intor, cintopt, ao_loc, atm, natm, bas, nbas, env); vhfopt->fprescreen = CVHFnr_schwarz_cond; int shls_slice[] = {0, nbas}; const int di = GTOmax_shell_dim(ao_loc, shls_slice, 1); const size_t cache_size = GTOmax_cache_size(intor, shls_slice, 1, atm, natm, bas, nbas, env); #pragma omp parallel { int i, j, ij; double *buf = malloc(sizeof(double) * (di*di*nao*nao + cache_size)); #pragma omp for nowait schedule(dynamic, 2) for (ij = 0; ij < nbas*(nbas+1)/2; ij++) { i = (int)(sqrt(2*ij+.25) - .5 + 1e-7); j = ij - (i*(i+1)/2); store_ij(intor, eri, buf, i, j, vhfopt, &envs); } free(buf); } CVHFdel_optimizer(&vhfopt); }
GB_unaryop.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop // op(A') function: GB_tran // C type: GB_ctype // A type: GB_atype // cast: GB_CAST(cij,aij) // unaryop: GB_UNARYOP(cij,aij) #define GB_ATYPE \ GB_atype #define GB_CTYPE \ GB_ctype // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GB_geta(aij,Ax,pA) #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ GB_UNARYOP(z, x) ; // casting #define GB_CASTING(z, x) \ GB_CAST(z, x) ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ GB_disable //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop ( GB_ctype *restrict Cx, const GB_atype *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
CLHelper.h
//------------------------------------------ //--cambine:helper function for OpenCL //--programmer: Jianbin Fang //--date: 27/12/2010 //------------------------------------------ #ifndef _CL_HELPER_ #define _CL_HELPER_ #include <CL/cl.h> #include <vector> #include <iostream> #include <fstream> #include <string> using std::string; using std::ifstream; using std::cerr; using std::endl; using std::cout; //#pragma OPENCL EXTENSION cl_nv_compiler_options:enable #define WORK_DIM 2 //work-items dimensions struct oclHandleStruct { cl_context context; cl_device_id *devices; cl_command_queue queue; cl_program program; cl_int cl_status; std::string error_str; std::vector<cl_kernel> kernel; }; struct oclHandleStruct oclHandles; char kernel_file[100] = "Kernels.cl"; int total_kernels = 2; string kernel_names[2] = {"BFS_1", "BFS_2"}; int work_group_size = 512; int device_id_inused = 0; //deviced id used (default : 0) /* * Converts the contents of a file into a string */ string FileToString(const string fileName) { ifstream f(fileName.c_str(), ifstream::in | ifstream::binary); try { size_t size; char* str; string s; if(f.is_open()) { size_t fileSize; f.seekg(0, ifstream::end); size = fileSize = f.tellg(); f.seekg(0, ifstream::beg); str = new char[size+1]; if (!str) throw(string("Could not allocate memory")); f.read(str, fileSize); f.close(); str[size] = '\0'; s = str; delete [] str; return s; } } catch(std::string msg) { cerr << "Exception caught in FileToString(): " << msg << endl; if(f.is_open()) f.close(); } catch(...) { cerr << "Exception caught in FileToString()" << endl; if(f.is_open()) f.close(); } string errorMsg = "FileToString()::Error: Unable to open file " + fileName; throw(errorMsg); } //--------------------------------------- //Read command line parameters // void _clCmdParams(int argc, char* argv[]){ for (int i =0; i < argc; ++i) { switch (argv[i][1]) { case 'g': //--g stands for size of work group if (++i < argc) { sscanf(argv[i], "%u", &work_group_size); } else { std::cerr << "Could not read argument after option " << argv[i-1] << std::endl; throw; } break; case 'd': //--d stands for device id used in computaion if (++i < argc) { sscanf(argv[i], "%u", &device_id_inused); } else { std::cerr << "Could not read argument after option " << argv[i-1] << std::endl; throw; } break; default: ; } } } //--------------------------------------- //Initlize CL objects //--description: there are 5 steps to initialize all the OpenCL objects needed //--revised on 04/01/2011: get the number of devices and // devices have no relationship with context void _clInit() { int DEVICE_ID_INUSED = device_id_inused; cl_int resultCL; oclHandles.context = NULL; oclHandles.devices = NULL; oclHandles.queue = NULL; oclHandles.program = NULL; cl_uint deviceListSize; //----------------------------------------------- //--cambine-1: find the available platforms and select one cl_uint numPlatforms; cl_platform_id targetPlatform = NULL; resultCL = clGetPlatformIDs(0, NULL, &numPlatforms); if (resultCL != CL_SUCCESS) throw (string("InitCL()::Error: Getting number of platforms (clGetPlatformIDs)")); //printf("number of platforms:%d\n",numPlatforms); //by cambine if (!(numPlatforms > 0)) throw (string("InitCL()::Error: No platforms found (clGetPlatformIDs)")); cl_platform_id* allPlatforms = (cl_platform_id*) malloc(numPlatforms * sizeof(cl_platform_id)); resultCL = clGetPlatformIDs(numPlatforms, allPlatforms, NULL); if (resultCL != CL_SUCCESS) throw (string("InitCL()::Error: Getting platform ids (clGetPlatformIDs)")); /* Select the target platform. Default: first platform */ targetPlatform = allPlatforms[0]; for (int i = 0; i < numPlatforms; i++) { char pbuff[128]; resultCL = clGetPlatformInfo( allPlatforms[i], CL_PLATFORM_VENDOR, sizeof(pbuff), pbuff, NULL); if (resultCL != CL_SUCCESS) throw (string("InitCL()::Error: Getting platform info (clGetPlatformInfo)")); //printf("vedor is %s\n",pbuff); } free(allPlatforms); //----------------------------------------------- //--cambine-2: create an OpenCL context cl_context_properties cprops[3] = { CL_CONTEXT_PLATFORM, (cl_context_properties)targetPlatform, 0 }; oclHandles.context = clCreateContextFromType(cprops, CL_DEVICE_TYPE_GPU, NULL, NULL, &resultCL); if ((resultCL != CL_SUCCESS) || (oclHandles.context == NULL)) throw (string("InitCL()::Error: Creating Context (clCreateContextFromType)")); //----------------------------------------------- //--cambine-3: detect OpenCL devices /* First, get the size of device list */ oclHandles.cl_status = clGetDeviceIDs(targetPlatform, CL_DEVICE_TYPE_GPU, 0, NULL, &deviceListSize); if(oclHandles.cl_status!=CL_SUCCESS){ throw(string("exception in _clInit -> clGetDeviceIDs")); } if (deviceListSize == 0) throw(string("InitCL()::Error: No devices found.")); //std::cout<<"device number:"<<deviceListSize<<std::endl; /* Now, allocate the device list */ oclHandles.devices = (cl_device_id *)malloc(deviceListSize * sizeof(cl_device_id)); if (oclHandles.devices == 0) throw(string("InitCL()::Error: Could not allocate memory.")); /* Next, get the device list data */ oclHandles.cl_status = clGetDeviceIDs(targetPlatform, CL_DEVICE_TYPE_GPU, deviceListSize, \ oclHandles.devices, NULL); if(oclHandles.cl_status!=CL_SUCCESS){ throw(string("exception in _clInit -> clGetDeviceIDs-2")); } //----------------------------------------------- //--cambine-4: Create an OpenCL command queue oclHandles.queue = clCreateCommandQueue(oclHandles.context, oclHandles.devices[DEVICE_ID_INUSED], 0, &resultCL); if ((resultCL != CL_SUCCESS) || (oclHandles.queue == NULL)) throw(string("InitCL()::Creating Command Queue. (clCreateCommandQueue)")); //----------------------------------------------- //--cambine-5: Load CL file, build CL program object, create CL kernel object std::string source_str = FileToString(kernel_file); const char * source = source_str.c_str(); size_t sourceSize[] = { source_str.length() }; oclHandles.program = clCreateProgramWithSource(oclHandles.context, 1, &source, sourceSize, &resultCL); if ((resultCL != CL_SUCCESS) || (oclHandles.program == NULL)) throw(string("InitCL()::Error: Loading Binary into cl_program. (clCreateProgramWithBinary)")); //insert debug information //std::string options= "-cl-nv-verbose"; //Doesn't work on AMD machines //options += " -cl-nv-opt-level=3"; resultCL = clBuildProgram(oclHandles.program, deviceListSize, oclHandles.devices, NULL, NULL,NULL); if ((resultCL != CL_SUCCESS) || (oclHandles.program == NULL)) { cerr << "InitCL()::Error: In clBuildProgram" << endl; size_t length; resultCL = clGetProgramBuildInfo(oclHandles.program, oclHandles.devices[DEVICE_ID_INUSED], CL_PROGRAM_BUILD_LOG, 0, NULL, &length); if(resultCL != CL_SUCCESS) throw(string("InitCL()::Error: Getting Program build info(clGetProgramBuildInfo)")); char* buffer = (char*)malloc(length); resultCL = clGetProgramBuildInfo(oclHandles.program, oclHandles.devices[DEVICE_ID_INUSED], CL_PROGRAM_BUILD_LOG, length, buffer, NULL); if(resultCL != CL_SUCCESS) throw(string("InitCL()::Error: Getting Program build info(clGetProgramBuildInfo)")); cerr << buffer << endl; free(buffer); throw(string("InitCL()::Error: Building Program (clBuildProgram)")); } //get program information in intermediate representation #ifdef PTX_MSG size_t binary_sizes[deviceListSize]; char * binaries[deviceListSize]; //figure out number of devices and the sizes of the binary for each device. oclHandles.cl_status = clGetProgramInfo(oclHandles.program, CL_PROGRAM_BINARY_SIZES, sizeof(size_t)*deviceListSize, &binary_sizes, NULL ); if(oclHandles.cl_status!=CL_SUCCESS){ throw(string("--cambine:exception in _InitCL -> clGetProgramInfo-2")); } std::cout<<"--cambine:"<<binary_sizes<<std::endl; //copy over all of the generated binaries. for(int i=0;i<deviceListSize;i++) binaries[i] = (char *)malloc( sizeof(char)*(binary_sizes[i]+1)); oclHandles.cl_status = clGetProgramInfo(oclHandles.program, CL_PROGRAM_BINARIES, sizeof(char *)*deviceListSize, binaries, NULL ); if(oclHandles.cl_status!=CL_SUCCESS){ throw(string("--cambine:exception in _InitCL -> clGetProgramInfo-3")); } for(int i=0;i<deviceListSize;i++) binaries[i][binary_sizes[i]] = '\0'; std::cout<<"--cambine:writing ptd information..."<<std::endl; FILE * ptx_file = fopen("cl.ptx","w"); if(ptx_file==NULL){ throw(string("exceptions in allocate ptx file.")); } fprintf(ptx_file,"%s",binaries[DEVICE_ID_INUSED]); fclose(ptx_file); std::cout<<"--cambine:writing ptd information done."<<std::endl; for(int i=0;i<deviceListSize;i++) free(binaries[i]); #endif for (int nKernel = 0; nKernel < total_kernels; nKernel++) { /* get a kernel object handle for a kernel with the given name */ cl_kernel kernel = clCreateKernel(oclHandles.program, (kernel_names[nKernel]).c_str(), &resultCL); if ((resultCL != CL_SUCCESS) || (kernel == NULL)) { string errorMsg = "InitCL()::Error: Creating Kernel (clCreateKernel) \"" + kernel_names[nKernel] + "\""; throw(errorMsg); } oclHandles.kernel.push_back(kernel); } //get resource alocation information #ifdef RES_MSG char * build_log; size_t ret_val_size; oclHandles.cl_status = clGetProgramBuildInfo(oclHandles.program, oclHandles.devices[DEVICE_ID_INUSED], CL_PROGRAM_BUILD_LOG, 0, NULL, &ret_val_size); if(oclHandles.cl_status!=CL_SUCCESS){ throw(string("exceptions in _InitCL -> getting resource information")); } build_log = (char *)malloc(ret_val_size+1); oclHandles.cl_status = clGetProgramBuildInfo(oclHandles.program, oclHandles.devices[DEVICE_ID_INUSED], CL_PROGRAM_BUILD_LOG, ret_val_size, build_log, NULL); if(oclHandles.cl_status!=CL_SUCCESS){ throw(string("exceptions in _InitCL -> getting resources allocation information-2")); } build_log[ret_val_size] = '\0'; std::cout<<"--cambine:"<<build_log<<std::endl; free(build_log); #endif } //--------------------------------------- //release CL objects void _clRelease() { char errorFlag = false; for (int nKernel = 0; nKernel < oclHandles.kernel.size(); nKernel++) { if (oclHandles.kernel[nKernel] != NULL) { cl_int resultCL = clReleaseKernel(oclHandles.kernel[nKernel]); if (resultCL != CL_SUCCESS) { cerr << "ReleaseCL()::Error: In clReleaseKernel" << endl; errorFlag = true; } oclHandles.kernel[nKernel] = NULL; } oclHandles.kernel.clear(); } if (oclHandles.program != NULL) { cl_int resultCL = clReleaseProgram(oclHandles.program); if (resultCL != CL_SUCCESS) { cerr << "ReleaseCL()::Error: In clReleaseProgram" << endl; errorFlag = true; } oclHandles.program = NULL; } if (oclHandles.queue != NULL) { cl_int resultCL = clReleaseCommandQueue(oclHandles.queue); if (resultCL != CL_SUCCESS) { cerr << "ReleaseCL()::Error: In clReleaseCommandQueue" << endl; errorFlag = true; } oclHandles.queue = NULL; } free(oclHandles.devices); if (oclHandles.context != NULL) { cl_int resultCL = clReleaseContext(oclHandles.context); if (resultCL != CL_SUCCESS) { cerr << "ReleaseCL()::Error: In clReleaseContext" << endl; errorFlag = true; } oclHandles.context = NULL; } if (errorFlag) throw(string("ReleaseCL()::Error encountered.")); } //-------------------------------------------------------- //--cambine:create buffer and then copy data from host to device cl_mem _clCreateAndCpyMem(int size, void * h_mem_source) throw(string){ cl_mem d_mem; d_mem = clCreateBuffer(oclHandles.context, CL_MEM_READ_ONLY|CL_MEM_COPY_HOST_PTR, \ size, h_mem_source, &oclHandles.cl_status); #ifdef ERRMSG if(oclHandles.cl_status != CL_SUCCESS) throw(string("excpetion in _clCreateAndCpyMem()")); #endif return d_mem; } //------------------------------------------------------- //--cambine: create read only buffer for devices //--date: 17/01/2011 cl_mem _clMallocRW(int size, void * h_mem_ptr) throw(string){ cl_mem d_mem; d_mem = clCreateBuffer(oclHandles.context, CL_MEM_READ_WRITE | CL_MEM_COPY_HOST_PTR, size, h_mem_ptr, &oclHandles.cl_status); #ifdef ERRMSG if(oclHandles.cl_status != CL_SUCCESS) throw(string("excpetion in _clMallocRW")); #endif return d_mem; } //------------------------------------------------------- //--cambine: create read and write buffer for devices //--date: 17/01/2011 cl_mem _clMalloc(int size, void * h_mem_ptr) throw(string){ cl_mem d_mem; d_mem = clCreateBuffer(oclHandles.context, CL_MEM_WRITE_ONLY | CL_MEM_COPY_HOST_PTR, size, h_mem_ptr, &oclHandles.cl_status); #ifdef ERRMSG if(oclHandles.cl_status != CL_SUCCESS) throw(string("excpetion in _clMalloc")); #endif return d_mem; } //------------------------------------------------------- //--cambine: transfer data from host to device //--date: 17/01/2011 void _clMemcpyH2D(cl_mem d_mem, int size, const void *h_mem_ptr) throw(string){ oclHandles.cl_status = clEnqueueWriteBuffer(oclHandles.queue, d_mem, CL_TRUE, 0, size, h_mem_ptr, 0, NULL, NULL); #ifdef ERRMSG if(oclHandles.cl_status != CL_SUCCESS) throw(string("excpetion in _clMemcpyH2D")); #endif } //-------------------------------------------------------- //--cambine:create buffer and then copy data from host to device with pinned // memory cl_mem _clCreateAndCpyPinnedMem(int size, float* h_mem_source) throw(string){ cl_mem d_mem, d_mem_pinned; float * h_mem_pinned = NULL; d_mem_pinned = clCreateBuffer(oclHandles.context, CL_MEM_READ_ONLY|CL_MEM_ALLOC_HOST_PTR, \ size, NULL, &oclHandles.cl_status); #ifdef ERRMSG if(oclHandles.cl_status != CL_SUCCESS) throw(string("excpetion in _clCreateAndCpyMem()->d_mem_pinned")); #endif //------------ d_mem = clCreateBuffer(oclHandles.context, CL_MEM_READ_ONLY, \ size, NULL, &oclHandles.cl_status); #ifdef ERRMSG if(oclHandles.cl_status != CL_SUCCESS) throw(string("excpetion in _clCreateAndCpyMem() -> d_mem ")); #endif //---------- h_mem_pinned = (cl_float *)clEnqueueMapBuffer(oclHandles.queue, d_mem_pinned, CL_TRUE, \ CL_MAP_WRITE, 0, size, 0, NULL, \ NULL, &oclHandles.cl_status); #ifdef ERRMSG if(oclHandles.cl_status != CL_SUCCESS) throw(string("excpetion in _clCreateAndCpyMem() -> clEnqueueMapBuffer")); #endif int element_number = size/sizeof(float); #pragma omp parallel for for(int i=0;i<element_number;i++){ h_mem_pinned[i] = h_mem_source[i]; } //---------- oclHandles.cl_status = clEnqueueWriteBuffer(oclHandles.queue, d_mem, \ CL_TRUE, 0, size, h_mem_pinned, \ 0, NULL, NULL); #ifdef ERRMSG if(oclHandles.cl_status != CL_SUCCESS) throw(string("excpetion in _clCreateAndCpyMem() -> clEnqueueWriteBuffer")); #endif return d_mem; } //-------------------------------------------------------- //--cambine:create write only buffer on device cl_mem _clMallocWO(int size) throw(string){ cl_mem d_mem; d_mem = clCreateBuffer(oclHandles.context, CL_MEM_WRITE_ONLY, size, 0, &oclHandles.cl_status); #ifdef ERRMSG if(oclHandles.cl_status != CL_SUCCESS) throw(string("excpetion in _clCreateMem()")); #endif return d_mem; } //-------------------------------------------------------- //transfer data from device to host void _clMemcpyD2H(cl_mem d_mem, int size, void * h_mem) throw(string){ oclHandles.cl_status = clEnqueueReadBuffer(oclHandles.queue, d_mem, CL_TRUE, 0, size, h_mem, 0,0,0); #ifdef ERRMSG oclHandles.error_str = "excpetion in _clCpyMemD2H -> "; switch(oclHandles.cl_status){ case CL_INVALID_COMMAND_QUEUE: oclHandles.error_str += "CL_INVALID_COMMAND_QUEUE"; break; case CL_INVALID_CONTEXT: oclHandles.error_str += "CL_INVALID_CONTEXT"; break; case CL_INVALID_MEM_OBJECT: oclHandles.error_str += "CL_INVALID_MEM_OBJECT"; break; case CL_INVALID_VALUE: oclHandles.error_str += "CL_INVALID_VALUE"; break; case CL_INVALID_EVENT_WAIT_LIST: oclHandles.error_str += "CL_INVALID_EVENT_WAIT_LIST"; break; case CL_MEM_OBJECT_ALLOCATION_FAILURE: oclHandles.error_str += "CL_MEM_OBJECT_ALLOCATION_FAILURE"; break; case CL_OUT_OF_HOST_MEMORY: oclHandles.error_str += "CL_OUT_OF_HOST_MEMORY"; break; default: oclHandles.error_str += "Unknown reason"; break; } if(oclHandles.cl_status != CL_SUCCESS) throw(oclHandles.error_str); #endif } //-------------------------------------------------------- //set kernel arguments void _clSetArgs(int kernel_id, int arg_idx, void * d_mem, int size = 0) throw(string){ if(!size){ oclHandles.cl_status = clSetKernelArg(oclHandles.kernel[kernel_id], arg_idx, sizeof(d_mem), &d_mem); #ifdef ERRMSG oclHandles.error_str = "excpetion in _clSetKernelArg() "; switch(oclHandles.cl_status){ case CL_INVALID_KERNEL: oclHandles.error_str += "CL_INVALID_KERNEL"; break; case CL_INVALID_ARG_INDEX: oclHandles.error_str += "CL_INVALID_ARG_INDEX"; break; case CL_INVALID_ARG_VALUE: oclHandles.error_str += "CL_INVALID_ARG_VALUE"; break; case CL_INVALID_MEM_OBJECT: oclHandles.error_str += "CL_INVALID_MEM_OBJECT"; break; case CL_INVALID_SAMPLER: oclHandles.error_str += "CL_INVALID_SAMPLER"; break; case CL_INVALID_ARG_SIZE: oclHandles.error_str += "CL_INVALID_ARG_SIZE"; break; case CL_OUT_OF_RESOURCES: oclHandles.error_str += "CL_OUT_OF_RESOURCES"; break; case CL_OUT_OF_HOST_MEMORY: oclHandles.error_str += "CL_OUT_OF_HOST_MEMORY"; break; default: oclHandles.error_str += "Unknown reason"; break; } if(oclHandles.cl_status != CL_SUCCESS) throw(oclHandles.error_str); #endif } else{ oclHandles.cl_status = clSetKernelArg(oclHandles.kernel[kernel_id], arg_idx, size, d_mem); #ifdef ERRMSG oclHandles.error_str = "excpetion in _clSetKernelArg() "; switch(oclHandles.cl_status){ case CL_INVALID_KERNEL: oclHandles.error_str += "CL_INVALID_KERNEL"; break; case CL_INVALID_ARG_INDEX: oclHandles.error_str += "CL_INVALID_ARG_INDEX"; break; case CL_INVALID_ARG_VALUE: oclHandles.error_str += "CL_INVALID_ARG_VALUE"; break; case CL_INVALID_MEM_OBJECT: oclHandles.error_str += "CL_INVALID_MEM_OBJECT"; break; case CL_INVALID_SAMPLER: oclHandles.error_str += "CL_INVALID_SAMPLER"; break; case CL_INVALID_ARG_SIZE: oclHandles.error_str += "CL_INVALID_ARG_SIZE"; break; case CL_OUT_OF_RESOURCES: oclHandles.error_str += "CL_OUT_OF_RESOURCES"; break; case CL_OUT_OF_HOST_MEMORY: oclHandles.error_str += "CL_OUT_OF_HOST_MEMORY"; break; default: oclHandles.error_str += "Unknown reason"; break; } if(oclHandles.cl_status != CL_SUCCESS) throw(oclHandles.error_str); #endif } } void _clFinish() throw(string){ oclHandles.cl_status = clFinish(oclHandles.queue); #ifdef ERRMSG oclHandles.error_str = "excpetion in _clFinish"; switch(oclHandles.cl_status){ case CL_INVALID_COMMAND_QUEUE: oclHandles.error_str += "CL_INVALID_COMMAND_QUEUE"; break; case CL_OUT_OF_RESOURCES: oclHandles.error_str += "CL_OUT_OF_RESOURCES"; break; case CL_OUT_OF_HOST_MEMORY: oclHandles.error_str += "CL_OUT_OF_HOST_MEMORY"; break; default: oclHandles.error_str += "Unknown reasons"; break; } if(oclHandles.cl_status!=CL_SUCCESS){ throw(oclHandles.error_str); } #endif } //-------------------------------------------------------- //--cambine:enqueue kernel void _clInvokeKernel(int kernel_id, int work_items, int work_group_size) throw(string){ cl_uint work_dim = WORK_DIM; cl_event e[1]; if(work_items%work_group_size != 0) //process situations that work_items cannot be divided by work_group_size work_items = work_items + (work_group_size-(work_items%work_group_size)); size_t local_work_size[] = {work_group_size, 1}; size_t global_work_size[] = {work_items, 1}; oclHandles.cl_status = clEnqueueNDRangeKernel(oclHandles.queue, oclHandles.kernel[kernel_id], work_dim, 0, \ global_work_size, local_work_size, 0 , 0, &(e[0]) ); #ifdef ERRMSG oclHandles.error_str = "excpetion in _clInvokeKernel() -> "; switch(oclHandles.cl_status) { case CL_INVALID_PROGRAM_EXECUTABLE: oclHandles.error_str += "CL_INVALID_PROGRAM_EXECUTABLE"; break; case CL_INVALID_COMMAND_QUEUE: oclHandles.error_str += "CL_INVALID_COMMAND_QUEUE"; break; case CL_INVALID_KERNEL: oclHandles.error_str += "CL_INVALID_KERNEL"; break; case CL_INVALID_CONTEXT: oclHandles.error_str += "CL_INVALID_CONTEXT"; break; case CL_INVALID_KERNEL_ARGS: oclHandles.error_str += "CL_INVALID_KERNEL_ARGS"; break; case CL_INVALID_WORK_DIMENSION: oclHandles.error_str += "CL_INVALID_WORK_DIMENSION"; break; case CL_INVALID_GLOBAL_WORK_SIZE: oclHandles.error_str += "CL_INVALID_GLOBAL_WORK_SIZE"; break; case CL_INVALID_WORK_GROUP_SIZE: oclHandles.error_str += "CL_INVALID_WORK_GROUP_SIZE"; break; case CL_INVALID_WORK_ITEM_SIZE: oclHandles.error_str += "CL_INVALID_WORK_ITEM_SIZE"; break; case CL_INVALID_GLOBAL_OFFSET: oclHandles.error_str += "CL_INVALID_GLOBAL_OFFSET"; break; case CL_OUT_OF_RESOURCES: oclHandles.error_str += "CL_OUT_OF_RESOURCES"; break; case CL_MEM_OBJECT_ALLOCATION_FAILURE: oclHandles.error_str += "CL_MEM_OBJECT_ALLOCATION_FAILURE"; break; case CL_INVALID_EVENT_WAIT_LIST: oclHandles.error_str += "CL_INVALID_EVENT_WAIT_LIST"; break; case CL_OUT_OF_HOST_MEMORY: oclHandles.error_str += "CL_OUT_OF_HOST_MEMORY"; break; default: oclHandles.error_str += "Unkown reseason"; break; } if(oclHandles.cl_status != CL_SUCCESS) throw(oclHandles.error_str); #endif //_clFinish(); // oclHandles.cl_status = clWaitForEvents(1, &e[0]); // #ifdef ERRMSG // if (oclHandles.cl_status!= CL_SUCCESS) // throw(string("excpetion in _clEnqueueNDRange() -> clWaitForEvents")); // #endif } void _clInvokeKernel2D(int kernel_id, int range_x, int range_y, int group_x, int group_y) throw(string){ cl_uint work_dim = WORK_DIM; size_t local_work_size[] = {group_x, group_y}; size_t global_work_size[] = {range_x, range_y}; cl_event e[1]; /*if(work_items%work_group_size != 0) //process situations that work_items cannot be divided by work_group_size work_items = work_items + (work_group_size-(work_items%work_group_size));*/ oclHandles.cl_status = clEnqueueNDRangeKernel(oclHandles.queue, oclHandles.kernel[kernel_id], work_dim, 0, \ global_work_size, local_work_size, 0 , 0, &(e[0]) ); #ifdef ERRMSG oclHandles.error_str = "excpetion in _clInvokeKernel() -> "; switch(oclHandles.cl_status) { case CL_INVALID_PROGRAM_EXECUTABLE: oclHandles.error_str += "CL_INVALID_PROGRAM_EXECUTABLE"; break; case CL_INVALID_COMMAND_QUEUE: oclHandles.error_str += "CL_INVALID_COMMAND_QUEUE"; break; case CL_INVALID_KERNEL: oclHandles.error_str += "CL_INVALID_KERNEL"; break; case CL_INVALID_CONTEXT: oclHandles.error_str += "CL_INVALID_CONTEXT"; break; case CL_INVALID_KERNEL_ARGS: oclHandles.error_str += "CL_INVALID_KERNEL_ARGS"; break; case CL_INVALID_WORK_DIMENSION: oclHandles.error_str += "CL_INVALID_WORK_DIMENSION"; break; case CL_INVALID_GLOBAL_WORK_SIZE: oclHandles.error_str += "CL_INVALID_GLOBAL_WORK_SIZE"; break; case CL_INVALID_WORK_GROUP_SIZE: oclHandles.error_str += "CL_INVALID_WORK_GROUP_SIZE"; break; case CL_INVALID_WORK_ITEM_SIZE: oclHandles.error_str += "CL_INVALID_WORK_ITEM_SIZE"; break; case CL_INVALID_GLOBAL_OFFSET: oclHandles.error_str += "CL_INVALID_GLOBAL_OFFSET"; break; case CL_OUT_OF_RESOURCES: oclHandles.error_str += "CL_OUT_OF_RESOURCES"; break; case CL_MEM_OBJECT_ALLOCATION_FAILURE: oclHandles.error_str += "CL_MEM_OBJECT_ALLOCATION_FAILURE"; break; case CL_INVALID_EVENT_WAIT_LIST: oclHandles.error_str += "CL_INVALID_EVENT_WAIT_LIST"; break; case CL_OUT_OF_HOST_MEMORY: oclHandles.error_str += "CL_OUT_OF_HOST_MEMORY"; break; default: oclHandles.error_str += "Unkown reseason"; break; } if(oclHandles.cl_status != CL_SUCCESS) throw(oclHandles.error_str); #endif //_clFinish(); /*oclHandles.cl_status = clWaitForEvents(1, &e[0]); #ifdef ERRMSG if (oclHandles.cl_status!= CL_SUCCESS) throw(string("excpetion in _clEnqueueNDRange() -> clWaitForEvents")); #endif*/ } //-------------------------------------------------------- //release OpenCL objects void _clFree(cl_mem ob) throw(string){ if(ob!=NULL) oclHandles.cl_status = clReleaseMemObject(ob); #ifdef ERRMSG oclHandles.error_str = "excpetion in _clFree() ->"; switch(oclHandles.cl_status) { case CL_INVALID_MEM_OBJECT: oclHandles.error_str += "CL_INVALID_MEM_OBJECT"; break; case CL_OUT_OF_RESOURCES: oclHandles.error_str += "CL_OUT_OF_RESOURCES"; break; case CL_OUT_OF_HOST_MEMORY: oclHandles.error_str += "CL_OUT_OF_HOST_MEMORY"; break; default: oclHandles.error_str += "Unkown reseason"; break; } if (oclHandles.cl_status!= CL_SUCCESS) throw(oclHandles.error_str); #endif } #endif //_CL_HELPER_
sparse_convert_mex.c
#include <mex.h> #include <libutils/debug_defs.h> #include <libutils/mtypes.h> #include <libutils/parallel.h> #include <libutils/message_id.h> #include <libutils/sorted_list.h> #include "mexio.h" #include "sparse_opts.h" #include "sparse_utils.h" void mexFunction(int nargout, mxArray *pargout [ ], int nargin, const mxArray *pargin[]) { char buff[256]; Uint errors = 0; t_opts opts; const mxArray *spA; mwSize *Ai; mwIndex *Ap; Double *Ax; dimType dim; indexType nnz; struct sparse_matrix_t sp = {0}; model_data mdata = {0}; if (nargin < 1) MEXHELP; if(!mxIsSparse(pargin[0])){ USERERROR("Parameter must be a sparse matrix.", MUTILS_INVALID_PARAMETER); } spA = pargin[0]; if(mxGetM(spA) != mxGetN(spA)){ USERERROR("Sparse matrix must be square.", MUTILS_INVALID_PARAMETER); } if(mxIsLogical(spA)){ USERERROR("Sparse matrix must be real-valued.", MUTILS_INVALID_PARAMETER); } Ap = mxGetJc(spA); Ai = mxGetIr(spA); Ax = mxGetPr(spA); /* check if the dimensions and number of non-zeros fit internal types */ SNPRINTF(buff, 255, "Sparse matrix dimension can be at most %"PRI_DIMTYPE, MaxDimType); managed_type_cast(dimType, dim, mxGetM(spA), buff); SNPRINTF(buff, 255, "Number of non-zero entries in the parse matrix can be at most %"PRI_INDEXTYPE, MaxIndexType); managed_type_cast(indexType, nnz, Ap[dim], buff); /* parse matrix conversion options */ if(nargin>=2){ opts = mex2opts(pargin[1]); } else { opts = mex2opts(NULL); } /* read row distribution of the matrix among the cpus */ if(nargin>=3){ size_t m, n; dimType *temp; m = 1; n = opts.nthreads+1; sp.row_cpu_dist = mex_get_matrix(dimType, pargin[2], &m, &n, "rowdist", "1", "nthreads+1", 1); if(sp.row_cpu_dist){ mmalloc_global(temp, sizeof(dimType)*(opts.nthreads+1)); for(m=0; m<opts.nthreads+1; m++) { temp[m] = sp.row_cpu_dist[m]-1; } sp.row_cpu_dist = temp; } TODO("Check that the row_cpu_dist is block-size consistent"); } /* data validation */ TODO("check that the matrix structure fits the given block size"); mdata.block_size = opts.block_size; mdata.interleaved = opts.interleave; mdata.deflate = opts.remove_zero_cols; mdata.nthreads = opts.nthreads; /* fill the sparse structure */ sp.matrix_dim = dim; sp.matrix_nz = nnz; sp.block_size = 1; sp.symmetric = opts.symmetric; sp.interleaved = 0; sp.cols_removed= opts.remove_zero_cols; sp.mincol = 0; sp.maxcol = 0; /* creating a 'localized' matrix distribution */ /* i.e. thread_Ai start with 0th column on all threads. */ if(sp.symmetric || mdata.deflate){ sp.localized = 1; mcalloc_global(sp.comm_pattern, sizeof(dimType*)*(mdata.nthreads*mdata.nthreads)); mcalloc_global(sp.n_comm_entries, sizeof(dimType)*(mdata.nthreads*mdata.nthreads)); mcalloc_global(mdata.local_offset, sizeof(dimType)*mdata.nthreads); mcalloc_global(mdata.mincol, sizeof(dimType)*mdata.nthreads); mcalloc_global(mdata.maxcol, sizeof(dimType)*mdata.nthreads); } if(opts.interleave){ USERERROR("not implemented yet", MUTILS_INVALID_PARAMETER); mcalloc(mdata.thread_Aix, sizeof(char*)*mdata.nthreads); } else { mcalloc(mdata.thread_Ai, sizeof(dimType*)*mdata.nthreads); mcalloc(mdata.thread_Ax, sizeof(Double*)*mdata.nthreads); } mcalloc(mdata.thread_Ap, sizeof(indexType*)*mdata.nthreads); /*************************************************************/ /* Find matrix distribution if not already given, e.g. from METIS. For that we only need Ap, since the distribution is done based on the number of non-zeros in the matrix alone. Assign contiguous row ranges to individual threads. If the matrix is blocked, assign rows in a block-aligned manner (based on Aiblock/Apblock) On exit, matrices have the follwing array fields: row_cpu_dist rows assigned to individual threads (Ap-like structure, size nthreads+1) nz_cpu_dist number of non-zero entries per thread (size nthreads) */ /*************************************************************/ tic(); sparse_find_distribution_matlab(mdata.nthreads, &sp, Ap, mdata.block_size); ntoc("Row distribution"); /* Here we know the data distribution, so the rest can be done in parallel. */ /* use target number of threads */ parallel_set_num_threads(mdata.nthreads); #ifdef USE_OPENMP #pragma omp parallel #endif { Uint thrid, nthr; dimType row_l; struct sparse_matrix_t sp_thr = sp; parallel_get_info(&thrid, &nthr); if(opts.cpu_affinity) affinity_bind(thrid, opts.cpu_start + thrid); if(nthr!=mdata.nthreads){ errors = 1; #ifdef USE_OPENMP #pragma omp single #endif SNPRINTF(buff, 255, "Could not start the requested number of %"PRI_UINT"threads.", mdata.nthreads); goto lerrors; } #ifdef USE_OPENMP #pragma omp single #endif VERBOSE("Using %"PRI_UINT" threads", DEBUG_BASIC, nthr); row_l = sp_thr.row_cpu_dist[thrid]; /* Work done here: we do need to find communication we need to block, or not we need to interleave, or not we need to deflate (remove empty columns), or not we do need to localize the thread matrices */ /* 1st pass through Ai is read-only sparse_find_communication can also prepare lists for matrix localization. Only needed for symmetric matrices, and when empty columns are removed. */ if(sp_thr.localized){ tic(); sparse_analyze_communication_matlab(thrid, nthr, &sp_thr, Ap, Ai); /* verify that a symmetric matrix only has upper-triangular part */ if(sp_thr.symmetric && sp_thr.mincol<row_l) errors = 1; #ifdef USE_OPENMP #pragma omp barrier #pragma omp flush(errors) #endif if(errors){ #ifdef USE_OPENMP #pragma omp single #endif SNPRINTF(buff, 255, "A symmetric sparse matrix should only contain the lower-triangular part."); goto lerrors; } #ifdef USE_OPENMP #pragma omp barrier #pragma omp single #endif sp.comm_pattern_ext = copy_comm_pattern(&sp_thr, &mdata); #ifdef USE_OPENMP #pragma omp barrier #pragma omp flush(sp) #endif sp_thr.comm_pattern_ext = sp.comm_pattern_ext; ntoc("Analyze communication"); #ifdef USE_OPENMP #pragma omp barrier #endif } /* 2nd pass through Ai read-write, Ax read-write, Ap read-write read Ai entries, convert Matlab to native types, block if needed, localize, deflate if needed, interleave if needed. if not interleaved already, distribute Ax. */ sparse_distribute_matlab(thrid, nthr, &sp_thr, Ap, Ai, Ax, mdata.block_size, mdata.deflate, mdata.thread_Ap+thrid, mdata.thread_Ai+thrid, mdata.thread_Ax+thrid); if(sp.localized){ mdata.local_offset[thrid] = sp_thr.local_offset; mdata.maxcol[thrid] = sp_thr.maxcol; mdata.mincol[thrid] = sp_thr.mincol; } /* Cleanly end the OpenMP parallel region. */ /* Using USERERROR from within parallel regions */ /* causes segfault in MATLAB */ lerrors: if(errors){ errors = 1; } } /* free all allocated memory and print an error message */ if(errors){ if(sp.localized){ free_comm_pattern(sp.comm_pattern, sp.n_comm_entries, &mdata); free_comm_pattern(sp.comm_pattern_ext, sp.n_comm_entries, &mdata); mfree_global(sp.n_comm_entries, sizeof(dimType)*(mdata.nthreads*mdata.nthreads)); mfree_global(mdata.local_offset, sizeof(dimType)*mdata.nthreads); mfree_global(mdata.mincol, sizeof(dimType)*mdata.nthreads); mfree_global(mdata.maxcol, sizeof(dimType)*mdata.nthreads); } /* was the row distribution supplied by the user? */ mfree_global(sp.row_cpu_dist, sizeof(dimType)*(mdata.nthreads+1)); mfree_global(sp.nz_cpu_dist, sizeof(indexType)*mdata.nthreads); /* TODO free 'global' memory allocations */ if(opts.interleave){ mfree(mdata.thread_Aix, sizeof(char*)*mdata.nthreads); } else { mfree(mdata.thread_Ai, sizeof(dimType*)*mdata.nthreads); mfree(mdata.thread_Ax, sizeof(Double*)*mdata.nthreads); } mfree(mdata.thread_Ap, sizeof(indexType*)*mdata.nthreads); DEBUG_STATISTICS; USERERROR("%s", MUTILS_INVALID_PARAMETER, buff); } /*************************************************************/ /* output: structure containing converted and distributed sparse matrix. */ tic(); pargout[0] = sparse2mex(sp, mdata, opts); ntoc("Data export to MATLAB"); /*************************************************************/ /* Free the internally used memory structures that are not exported to MATLAB. */ if(opts.interleave){ mfree(mdata.thread_Aix, sizeof(char*)*mdata.nthreads); } else { mfree(mdata.thread_Ai, sizeof(dimType*)*mdata.nthreads); mfree(mdata.thread_Ax, sizeof(Double*)*mdata.nthreads); } mfree(mdata.thread_Ap, sizeof(indexType*)*mdata.nthreads); DEBUG_STATISTICS; }
pngquant.c
/* pngquant.c - quantize the colors in an alphamap down to a specified number ** ** © 2009-2019 by Kornel Lesiński. ** © 1989, 1991 by Jef Poskanzer. ** © 1997-2002 by Greg Roelofs; based on an idea by Stefan Schneider. ** ** See COPYRIGHT file for license. */ char *PNGQUANT_USAGE = "\ usage: pngquant [options] [ncolors] -- pngfile [pngfile ...]\n\ pngquant [options] [ncolors] - >stdout <stdin\n\n\ options:\n\ --force overwrite existing output files (synonym: -f)\n\ --skip-if-larger only save converted files if they're smaller than original\n\ --output file destination file path to use instead of --ext (synonym: -o)\n\ --ext new.png set custom suffix/extension for output filenames\n\ --quality min-max don't save below min, use fewer colors below max (0-100)\n\ --speed N speed/quality trade-off. 1=slow, 4=default, 11=fast & rough\n\ --nofs disable Floyd-Steinberg dithering\n\ --posterize N output lower-precision color (e.g. for ARGB4444 output)\n\ --strip remove optional metadata (default on Mac)\n\ --verbose print status messages (synonym: -v)\n\ \n\ Quantizes one or more 32-bit RGBA PNGs to 8-bit (or smaller) RGBA-palette.\n\ The output filename is the same as the input name except that\n\ it ends in \"-fs8.png\", \"-or8.png\" or your custom extension (unless the\n\ input is stdin, in which case the quantized image will go to stdout).\n\ If you pass the special output path \"-\" and a single input file, that file\n\ will be processed and the quantized image will go to stdout.\n\ The default behavior if the output file exists is to skip the conversion;\n\ use --force to overwrite. See man page for full list of options.\n"; #include <stdio.h> #include <stdlib.h> #include <string.h> #include <stdarg.h> #include <stdbool.h> #include <math.h> #if defined(_WIN32) || defined(WIN32) || defined(__WIN32__) # include <fcntl.h> /* O_BINARY */ # include <io.h> /* setmode() */ #else # include <unistd.h> #endif #ifdef _OPENMP #include <omp.h> #else #define omp_get_max_threads() 1 #define omp_get_thread_num() 0 #endif #include "rwpng.h" /* typedefs, common macros, public prototypes */ #include "libimagequant.h" /* if it fails here, run: git submodule update; ./configure; or add -Ilib to compiler flags */ #include "pngquant_opts.h" char *PNGQUANT_VERSION = LIQ_VERSION_STRING " (February 2021)"; static pngquant_error prepare_output_image(liq_result *result, liq_image *input_image, rwpng_color_transform tag, png8_image *output_image); static void set_palette(liq_result *result, png8_image *output_image); static pngquant_error read_image(liq_attr *options, const char *filename, int using_stdin, png24_image *input_image_p, liq_image **liq_image_p, bool keep_input_pixels, bool strip, bool verbose); static pngquant_error write_image(png8_image *output_image, png24_image *output_image24, const char *outname, struct pngquant_options *options, liq_attr *liq); static char *add_filename_extension(const char *filename, const char *newext); static bool file_exists(const char *outname); static void verbose_printf(liq_attr *liq, struct pngquant_options *context, const char *fmt, ...) { if (context->log_callback) { va_list va; va_start(va, fmt); int required_space = vsnprintf(NULL, 0, fmt, va)+1; // +\0 va_end(va); #if defined(_MSC_VER) char *buf = malloc(required_space); #else char buf[required_space]; #endif va_start(va, fmt); vsnprintf(buf, required_space, fmt, va); va_end(va); context->log_callback(liq, buf, context->log_callback_user_info); #if defined(_MSC_VER) free(buf); #endif } } static void log_callback(const liq_attr *attr, const char *msg, void* user_info) { fprintf(stderr, "%s\n", msg); } #ifdef _OPENMP #define LOG_BUFFER_SIZE 1300 struct buffered_log { int buf_used; char buf[LOG_BUFFER_SIZE]; }; static void log_callback_buferred_flush(const liq_attr *attr, void *context) { struct buffered_log *log = context; if (log->buf_used) { fwrite(log->buf, 1, log->buf_used, stderr); fflush(stderr); log->buf_used = 0; } } static void log_callback_buferred(const liq_attr *attr, const char *msg, void* context) { struct buffered_log *log = context; int len = strlen(msg); if (len > LOG_BUFFER_SIZE-2) len = LOG_BUFFER_SIZE-2; if (len > LOG_BUFFER_SIZE - log->buf_used - 2) log_callback_buferred_flush(attr, log); memcpy(&log->buf[log->buf_used], msg, len); log->buf_used += len+1; log->buf[log->buf_used-1] = '\n'; log->buf[log->buf_used] = '\0'; } #endif void pngquant_internal_print_config(FILE *fd) { fputs("" #ifndef NDEBUG " WARNING: this is a DEBUG (slow) version.\n" /* NDEBUG disables assert() */ #endif #if !USE_SSE && (defined(__SSE__) || defined(__amd64__) || defined(__X86_64__) || defined(__i386__)) " SSE acceleration disabled.\n" #endif #if _OPENMP " Compiled with OpenMP (multicore support).\n" #endif , fd); fflush(fd); } FILE *pngquant_c_stderr() { return stderr; } FILE *pngquant_c_stdout() { return stdout; } static void print_full_version(FILE *fd) { fprintf(fd, "pngquant, %s, by Kornel Lesinski, Greg Roelofs.\n", PNGQUANT_VERSION); pngquant_internal_print_config(fd); rwpng_version_info(fd); fputs("\n", fd); } static void print_usage(FILE *fd) { fputs(PNGQUANT_USAGE, fd); } /** * N = automatic quality, uses limit unless force is set (N-N or 0-N) * -N = no better than N (same as 0-N) * N-M = no worse than N, no better than M * N- = no worse than N, perfect if possible (same as N-100) * * where N,M are numbers between 0 (lousy) and 100 (perfect) */ static bool parse_quality(const char *quality, liq_attr *options, bool *min_quality_limit) { long limit, target; const char *str = quality; char *end; long t1 = strtol(str, &end, 10); if (str == end) return false; str = end; if ('\0' == end[0] && t1 < 0) { // quality="-%d" target = -t1; limit = 0; } else if ('\0' == end[0]) { // quality="%d" target = t1; limit = t1*9/10; } else if ('-' == end[0] && '\0' == end[1]) { // quality="%d-" target = 100; limit = t1; } else { // quality="%d-%d" long t2 = strtol(str, &end, 10); if (str == end || t2 > 0) return false; target = -t2; limit = t1; } *min_quality_limit = (limit > 0); return LIQ_OK == liq_set_quality(options, limit, target); } pngquant_error pngquant_main_internal(struct pngquant_options *options, liq_attr *liq); static pngquant_error pngquant_file_internal(const char *filename, const char *outname, struct pngquant_options *options, liq_attr *liq); #ifndef PNGQUANT_NO_MAIN int main(int argc, char *argv[]) { struct pngquant_options options = { .floyd = 1.f, // floyd-steinberg dithering .strip = false, }; pngquant_error retval = pngquant_parse_options(argc, argv, &options); if (retval != SUCCESS) { return retval; } if (options.print_version) { puts(PNGQUANT_VERSION); return SUCCESS; } if (options.missing_arguments) { print_full_version(stderr); print_usage(stderr); return MISSING_ARGUMENT; } if (options.print_help) { print_full_version(stdout); print_usage(stdout); return SUCCESS; } liq_attr *liq = liq_attr_create(); if (!liq) { fputs("SSE-capable CPU is required for this build.\n", stderr); return WRONG_ARCHITECTURE; } if (options.quality && !parse_quality(options.quality, liq, &options.min_quality_limit)) { fputs("Quality should be in format min-max where min and max are numbers in range 0-100.\n", stderr); return INVALID_ARGUMENT; } if (options.iebug) { // opacities above 238 will be rounded up to 255, because IE6 truncates <255 to 0. liq_set_min_opacity(liq, 238); fputs(" warning: the workaround for IE6 is deprecated\n", stderr); } if (options.verbose) { liq_set_log_callback(liq, log_callback, NULL); options.log_callback = log_callback; } if (options.last_index_transparent) { liq_set_last_index_transparent(liq, true); } if (options.speed >= 10) { options.fast_compression = true; if (options.speed == 11) { options.floyd = 0; options.speed = 10; } } if (options.speed && LIQ_OK != liq_set_speed(liq, options.speed)) { fputs("Speed should be between 1 (slow) and 11 (fast).\n", stderr); return INVALID_ARGUMENT; } if (options.colors && LIQ_OK != liq_set_max_colors(liq, options.colors)) { fputs("Number of colors must be between 2 and 256.\n", stderr); return INVALID_ARGUMENT; } if (options.posterize && LIQ_OK != liq_set_min_posterization(liq, options.posterize)) { fputs("Posterization should be number of bits in range 0-4.\n", stderr); return INVALID_ARGUMENT; } if (options.extension && options.output_file_path) { fputs("--ext and --output options can't be used at the same time\n", stderr); return INVALID_ARGUMENT; } // new filename extension depends on options used. Typically basename-fs8.png if (options.extension == NULL) { options.extension = options.floyd > 0 ? "-fs8.png" : "-or8.png"; } if (options.output_file_path && options.num_files != 1) { fputs(" error: Only one input file is allowed when --output is used. This error also happens when filenames with spaces are not in quotes.\n", stderr); return INVALID_ARGUMENT; } if (options.using_stdout && !options.using_stdin && options.num_files != 1) { fputs(" error: Only one input file is allowed when using the special output path \"-\" to write to stdout. This error also happens when filenames with spaces are not in quotes.\n", stderr); return INVALID_ARGUMENT; } if (!options.num_files && !options.using_stdin) { fputs("No input files specified.\n", stderr); if (options.verbose) { print_full_version(stderr); } print_usage(stderr); return MISSING_ARGUMENT; } retval = pngquant_main_internal(&options, liq); liq_attr_destroy(liq); return retval; } #endif // Don't use this. This is not a public API. pngquant_error pngquant_main_internal(struct pngquant_options *options, liq_attr *liq) { if (options->map_file) { png24_image tmp = {.width=0}; if (SUCCESS != read_image(liq, options->map_file, false, &tmp, &options->fixed_palette_image, true, true, false)) { fprintf(stderr, " error: unable to load %s", options->map_file); return INVALID_ARGUMENT; } liq_result *tmp_quantize = liq_quantize_image(liq, options->fixed_palette_image); const liq_palette *pal = liq_get_palette(tmp_quantize); if (!pal) { fprintf(stderr, " error: unable to read colors from %s", options->map_file); return INVALID_ARGUMENT; } for(unsigned int i=0; i < pal->count; i++) { liq_image_add_fixed_color(options->fixed_palette_image, pal->entries[i]); } liq_result_destroy(tmp_quantize); } #ifdef _OPENMP // if there's a lot of files, coarse parallelism can be used if (options->num_files > 2*omp_get_max_threads()) { omp_set_nested(0); omp_set_dynamic(1); } else { omp_set_nested(1); } #endif unsigned int error_count=0, skipped_count=0, file_count=0; pngquant_error latest_error=SUCCESS; #pragma omp parallel for \ schedule(static, 1) reduction(+:skipped_count) reduction(+:error_count) reduction(+:file_count) shared(latest_error) for(int i=0; i < options->num_files; i++) { const char *filename = options->using_stdin ? "stdin" : options->files[i]; struct pngquant_options opts = *options; liq_attr *local_liq = liq_attr_copy(liq); #ifdef _OPENMP struct buffered_log buf = {0}; if (opts.log_callback && omp_get_num_threads() > 1 && opts.num_files > 1) { liq_set_log_callback(local_liq, log_callback_buferred, &buf); liq_set_log_flush_callback(local_liq, log_callback_buferred_flush, &buf); opts.log_callback = log_callback_buferred; opts.log_callback_user_info = &buf; } #endif pngquant_error retval = SUCCESS; const char *outname = opts.output_file_path; char *outname_free = NULL; if (!opts.using_stdout) { if (!outname) { outname = outname_free = add_filename_extension(filename, opts.extension); } if (!opts.force && file_exists(outname)) { fprintf(stderr, " error: '%s' exists; not overwriting\n", outname); retval = NOT_OVERWRITING_ERROR; } } if (SUCCESS == retval) { retval = pngquant_file_internal(filename, outname, &opts, local_liq); } free(outname_free); liq_attr_destroy(local_liq); if (retval) { #pragma omp critical { latest_error = retval; } if (retval == TOO_LOW_QUALITY || retval == TOO_LARGE_FILE) { skipped_count++; } else { error_count++; } } ++file_count; } if (error_count) { verbose_printf(liq, options, "There were errors quantizing %d file%s out of a total of %d file%s.", error_count, (error_count == 1)? "" : "s", file_count, (file_count == 1)? "" : "s"); } if (skipped_count) { verbose_printf(liq, options, "Skipped %d file%s out of a total of %d file%s.", skipped_count, (skipped_count == 1)? "" : "s", file_count, (file_count == 1)? "" : "s"); } if (!skipped_count && !error_count) { verbose_printf(liq, options, "Quantized %d image%s.", file_count, (file_count == 1)? "" : "s"); } if (options->fixed_palette_image) liq_image_destroy(options->fixed_palette_image); return latest_error; } /// Don't hack this. Instead use https://github.com/ImageOptim/libimagequant/blob/f54d2f1a3e1cf728e17326f4db0d45811c63f063/example.c static pngquant_error pngquant_file_internal(const char *filename, const char *outname, struct pngquant_options *options, liq_attr *liq) { pngquant_error retval = SUCCESS; verbose_printf(liq, options, "%s:", filename); liq_image *input_image = NULL; png24_image input_image_rwpng = {.width=0}; bool keep_input_pixels = options->skip_if_larger || (options->using_stdout && options->min_quality_limit); // original may need to be output to stdout if (SUCCESS == retval) { retval = read_image(liq, filename, options->using_stdin, &input_image_rwpng, &input_image, keep_input_pixels, options->strip, options->verbose); } int quality_percent = 90; // quality on 0-100 scale, updated upon successful remap png8_image output_image = {.width=0}; if (SUCCESS == retval) { verbose_printf(liq, options, " read %luKB file", (input_image_rwpng.file_size+1023UL)/1024UL); if (RWPNG_ICCP == input_image_rwpng.input_color) { verbose_printf(liq, options, " used embedded ICC profile to transform image to sRGB colorspace"); } else if (RWPNG_GAMA_CHRM == input_image_rwpng.input_color) { verbose_printf(liq, options, " used gAMA and cHRM chunks to transform image to sRGB colorspace"); } else if (RWPNG_ICCP_WARN_GRAY == input_image_rwpng.input_color) { verbose_printf(liq, options, " warning: ignored ICC profile in GRAY colorspace"); } else if (RWPNG_COCOA == input_image_rwpng.input_color) { // No comment } else if (RWPNG_SRGB == input_image_rwpng.input_color) { verbose_printf(liq, options, " passing sRGB tag from the input"); } else if (input_image_rwpng.gamma != 0.45455) { verbose_printf(liq, options, " converted image from gamma %2.1f to gamma 2.2", 1.0/input_image_rwpng.gamma); } // when using image as source of a fixed palette the palette is extracted using regular quantization liq_result *remap; liq_error remap_error = liq_image_quantize(options->fixed_palette_image ? options->fixed_palette_image : input_image, liq, &remap); if (LIQ_OK == remap_error) { // fixed gamma ~2.2 for the web. PNG can't store exact 1/2.2 // NB: can't change gamma here, because output_color is allowed to be an sRGB tag liq_set_output_gamma(remap, 0.45455); liq_set_dithering_level(remap, options->floyd); retval = prepare_output_image(remap, input_image, input_image_rwpng.output_color, &output_image); if (SUCCESS == retval) { if (LIQ_OK != liq_write_remapped_image_rows(remap, input_image, output_image.row_pointers)) { retval = OUT_OF_MEMORY_ERROR; } set_palette(remap, &output_image); double palette_error = liq_get_quantization_error(remap); if (palette_error >= 0) { quality_percent = liq_get_quantization_quality(remap); verbose_printf(liq, options, " mapped image to new colors...MSE=%.3f (Q=%d)", palette_error, quality_percent); } } liq_result_destroy(remap); } else if (LIQ_QUALITY_TOO_LOW == remap_error) { retval = TOO_LOW_QUALITY; } else { retval = INVALID_ARGUMENT; // dunno } } if (SUCCESS == retval) { if (options->skip_if_larger) { // this is very rough approximation, but generally avoid losing more quality than is gained in file size. // Quality is raised to 1.5, because even greater savings are needed to justify big quality loss. // but >50% savings are considered always worthwhile in order to allow low quality conversions to work at all const double quality = quality_percent/100.0; const double expected_reduced_size = pow(quality, 1.5); output_image.maximum_file_size = (input_image_rwpng.file_size-1) * (expected_reduced_size < 0.5 ? 0.5 : expected_reduced_size); } output_image.fast_compression = options->fast_compression; output_image.chunks = input_image_rwpng.chunks; input_image_rwpng.chunks = NULL; retval = write_image(&output_image, NULL, outname, options, liq); if (TOO_LARGE_FILE == retval) { verbose_printf(liq, options, " file exceeded expected size of %luKB", (unsigned long)output_image.maximum_file_size/1024UL); } if (SUCCESS == retval && output_image.metadata_size > 0) { verbose_printf(liq, options, " copied %dKB of additional PNG metadata", (int)(output_image.metadata_size+999)/1000); } } if (options->using_stdout && keep_input_pixels && (TOO_LARGE_FILE == retval || TOO_LOW_QUALITY == retval)) { // when outputting to stdout it'd be nasty to create 0-byte file // so if quality is too low, output 24-bit original pngquant_error write_retval = write_image(NULL, &input_image_rwpng, outname, options, liq); if (write_retval) { retval = write_retval; } } if (input_image) liq_image_destroy(input_image); rwpng_free_image24(&input_image_rwpng); rwpng_free_image8(&output_image); return retval; } static void set_palette(liq_result *result, png8_image *output_image) { const liq_palette *palette = liq_get_palette(result); output_image->num_palette = palette->count; for(unsigned int i=0; i < palette->count; i++) { const liq_color px = palette->entries[i]; output_image->palette[i] = (rwpng_rgba){.r=px.r, .g=px.g, .b=px.b, .a=px.a}; } } static bool file_exists(const char *outname) { FILE *outfile = fopen(outname, "rb"); if ((outfile ) != NULL) { fclose(outfile); return true; } return false; } /* build the output filename from the input name by inserting "-fs8" or * "-or8" before the ".png" extension (or by appending that plus ".png" if * there isn't any extension), then make sure it doesn't exist already */ static char *add_filename_extension(const char *filename, const char *newext) { size_t x = strlen(filename); char* outname = malloc(x+4+strlen(newext)+1); if (!outname) return NULL; strcpy(outname, filename); if (x > 4 && (strncmp(outname+x-4, ".png", 4) == 0 || strncmp(outname+x-4, ".PNG", 4) == 0)) { strcpy(outname+x-4, newext); } else { strcpy(outname+x, newext); } return outname; } static char *temp_filename(const char *basename) { size_t x = strlen(basename); char *outname = malloc(x+1+4); if (!outname) return NULL; strcpy(outname, basename); strcpy(outname+x, ".tmp"); return outname; } static void set_binary_mode(FILE *fp) { #if defined(_WIN32) || defined(WIN32) || defined(__WIN32__) setmode(fp == stdout ? 1 : 0, O_BINARY); #endif } static const char *filename_part(const char *path) { const char *outfilename = strrchr(path, '/'); if (outfilename) { return outfilename+1; } else { return path; } } static bool replace_file(const char *from, const char *to, const bool force) { #if defined(_WIN32) || defined(WIN32) || defined(__WIN32__) if (force) { // On Windows rename doesn't replace unlink(to); } #endif return (0 == rename(from, to)); } static pngquant_error write_image(png8_image *output_image, png24_image *output_image24, const char *outname, struct pngquant_options *options, liq_attr *liq) { FILE *outfile; char *tempname = NULL; if (options->using_stdout) { set_binary_mode(stdout); outfile = stdout; if (output_image) { verbose_printf(liq, options, " writing %d-color image to stdout", output_image->num_palette); } else { verbose_printf(liq, options, " writing truecolor image to stdout"); } } else { tempname = temp_filename(outname); if (!tempname) return OUT_OF_MEMORY_ERROR; if ((outfile = fopen(tempname, "wb")) == NULL) { fprintf(stderr, " error: cannot open '%s' for writing\n", tempname); free(tempname); return CANT_WRITE_ERROR; } if (output_image) { verbose_printf(liq, options, " writing %d-color image as %s", output_image->num_palette, filename_part(outname)); } else { verbose_printf(liq, options, " writing truecolor image as %s", filename_part(outname)); } } pngquant_error retval; #pragma omp critical (libpng) { if (output_image) { retval = rwpng_write_image8(outfile, output_image); } else { retval = rwpng_write_image24(outfile, output_image24); } } if (!options->using_stdout) { fclose(outfile); if (SUCCESS == retval) { // Image has been written to a temporary file and then moved over destination. // This makes replacement atomic and avoids damaging destination file on write error. if (!replace_file(tempname, outname, options->force)) { retval = CANT_WRITE_ERROR; } } if (retval) { unlink(tempname); } } free(tempname); if (retval && retval != TOO_LARGE_FILE) { fprintf(stderr, " error: failed writing image to %s (%d)\n", options->using_stdout ? "stdout" : outname, retval); } return retval; } static pngquant_error read_image(liq_attr *options, const char *filename, int using_stdin, png24_image *input_image_p, liq_image **liq_image_p, bool keep_input_pixels, bool strip, bool verbose) { FILE *infile; if (using_stdin) { set_binary_mode(stdin); infile = stdin; } else if ((infile = fopen(filename, "rb")) == NULL) { fprintf(stderr, " error: cannot open %s for reading\n", filename); return READ_ERROR; } pngquant_error retval; #pragma omp critical (libpng) { retval = rwpng_read_image24(infile, input_image_p, strip, verbose); } if (!using_stdin) { fclose(infile); } if (retval) { fprintf(stderr, " error: cannot decode image %s\n", using_stdin ? "from stdin" : filename_part(filename)); return retval; } *liq_image_p = liq_image_create_rgba_rows(options, (void**)input_image_p->row_pointers, input_image_p->width, input_image_p->height, input_image_p->gamma); if (!*liq_image_p) { return OUT_OF_MEMORY_ERROR; } if (!keep_input_pixels) { if (LIQ_OK != liq_image_set_memory_ownership(*liq_image_p, LIQ_OWN_ROWS | LIQ_OWN_PIXELS)) { return OUT_OF_MEMORY_ERROR; } input_image_p->row_pointers = NULL; input_image_p->rgba_data = NULL; } return SUCCESS; } static pngquant_error prepare_output_image(liq_result *result, liq_image *input_image, rwpng_color_transform output_color, png8_image *output_image) { output_image->width = liq_image_get_width(input_image); output_image->height = liq_image_get_height(input_image); output_image->gamma = liq_get_output_gamma(result); output_image->output_color = output_color; /* ** Step 3.7 [GRR]: allocate memory for the entire indexed image */ output_image->indexed_data = malloc((size_t)output_image->height * (size_t)output_image->width); output_image->row_pointers = malloc((size_t)output_image->height * sizeof(output_image->row_pointers[0])); if (!output_image->indexed_data || !output_image->row_pointers) { return OUT_OF_MEMORY_ERROR; } for(size_t row = 0; row < output_image->height; row++) { output_image->row_pointers[row] = output_image->indexed_data + row * output_image->width; } const liq_palette *palette = liq_get_palette(result); // tRNS, etc. output_image->num_palette = palette->count; return SUCCESS; }
displacement_lagrangemultiplier_frictional_contact_criteria.h
// KRATOS ___| | | | // \___ \ __| __| | | __| __| | | __| _` | | // | | | | | ( | | | | ( | | // _____/ \__|_| \__,_|\___|\__|\__,_|_| \__,_|_| MECHANICS // // License: BSD License // license: StructuralMechanicsApplication/license.txt // // Main authors: Vicente Mataix Ferrandiz // #if !defined(KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_FRICTIONAL_CONTACT_CRITERIA_H) #define KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_FRICTIONAL_CONTACT_CRITERIA_H /* System includes */ /* External includes */ /* Project includes */ #include "utilities/table_stream_utility.h" #include "utilities/color_utilities.h" #include "solving_strategies/convergencecriterias/convergence_criteria.h" #include "custom_utilities/active_set_utilities.h" #include "utilities/constraint_utilities.h" #include "custom_utilities/contact_utilities.h" namespace Kratos { ///@addtogroup ContactStructuralMechanicsApplication ///@{ ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@name Kratos Classes ///@{ /** * @class DisplacementLagrangeMultiplierFrictionalContactCriteria * @ingroup ContactStructuralMechanicsApplication * @brief Convergence criteria for contact problems * @details This class implements a convergence control based on nodal displacement and * lagrange multiplier values. The error is evaluated separately for each of them, and * relative and absolute tolerances for both must be specified. * @author Vicente Mataix Ferrandiz */ template< class TSparseSpace, class TDenseSpace > class DisplacementLagrangeMultiplierFrictionalContactCriteria : public ConvergenceCriteria< TSparseSpace, TDenseSpace > { public: ///@name Type Definitions ///@{ /// Pointer definition of DisplacementLagrangeMultiplierFrictionalContactCriteria KRATOS_CLASS_POINTER_DEFINITION( DisplacementLagrangeMultiplierFrictionalContactCriteria ); /// Local Flags KRATOS_DEFINE_LOCAL_FLAG( ENSURE_CONTACT ); KRATOS_DEFINE_LOCAL_FLAG( PRINTING_OUTPUT ); KRATOS_DEFINE_LOCAL_FLAG( TABLE_IS_INITIALIZED ); KRATOS_DEFINE_LOCAL_FLAG( PURE_SLIP ); /// The base class definition (and it subclasses) typedef ConvergenceCriteria< TSparseSpace, TDenseSpace > BaseType; typedef typename BaseType::TDataType TDataType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; /// The sparse space used typedef TSparseSpace SparseSpaceType; /// The r_table stream definition TODO: Replace by logger typedef TableStreamUtility::Pointer TablePrinterPointerType; /// The index type definition typedef std::size_t IndexType; /// The key type definition typedef std::size_t KeyType; /// The epsilon tolerance definition static constexpr double Tolerance = std::numeric_limits<double>::epsilon(); ///@} ///@name Life Cycle ///@{ /// Constructor. /** * @param DispRatioTolerance Relative tolerance for displacement error * @param DispAbsTolerance Absolute tolerance for displacement error * @param LMRatioTolerance Relative tolerance for lagrange multiplier error * @param LMAbsTolerance Absolute tolerance for lagrange multiplier error * @param NormalTangentRatio Ratio between the normal and tangent that will accepted as converged * @param EnsureContact To check if the contact is lost * @param pTable The pointer to the output r_table * @param PrintingOutput If the output is going to be printed in a txt file */ explicit DisplacementLagrangeMultiplierFrictionalContactCriteria( const TDataType DispRatioTolerance, const TDataType DispAbsTolerance, const TDataType LMNormalRatioTolerance, const TDataType LMNormalAbsTolerance, const TDataType LMTangentStickRatioTolerance, const TDataType LMTangentStickAbsTolerance, const TDataType LMTangentSlipRatioTolerance, const TDataType LMTangentSlipAbsTolerance, const TDataType NormalTangentRatio, const bool EnsureContact = false, const bool PureSlip = false, const bool PrintingOutput = false ) : BaseType() { // Set local flags mOptions.Set(DisplacementLagrangeMultiplierFrictionalContactCriteria::ENSURE_CONTACT, EnsureContact); mOptions.Set(DisplacementLagrangeMultiplierFrictionalContactCriteria::PRINTING_OUTPUT, PrintingOutput); mOptions.Set(DisplacementLagrangeMultiplierFrictionalContactCriteria::PURE_SLIP, PureSlip); mOptions.Set(DisplacementLagrangeMultiplierFrictionalContactCriteria::TABLE_IS_INITIALIZED, false); // The displacement solution mDispRatioTolerance = DispRatioTolerance; mDispAbsTolerance = DispAbsTolerance; // The normal contact solution mLMNormalRatioTolerance = LMNormalRatioTolerance; mLMNormalAbsTolerance = LMNormalAbsTolerance; // The tangent contact solution mLMTangentStickRatioTolerance = LMTangentStickRatioTolerance; mLMTangentStickAbsTolerance = LMTangentStickAbsTolerance; mLMTangentStickRatioTolerance = LMTangentSlipRatioTolerance; mLMTangentStickAbsTolerance = LMTangentSlipAbsTolerance; // We get the ratio between the normal and tangent that will accepted as converged mNormalTangentRatio = NormalTangentRatio; } /** * @brief Default constructor (parameters) * @param ThisParameters The configuration parameters */ explicit DisplacementLagrangeMultiplierFrictionalContactCriteria( Parameters ThisParameters = Parameters(R"({})")) : BaseType() { // The default parameters Parameters default_parameters = Parameters(R"( { "ensure_contact" : false, "pure_slip" : false, "print_convergence_criterion" : false, "displacement_relative_tolerance" : 1.0e-4, "displacement_absolute_tolerance" : 1.0e-9, "contact_displacement_relative_tolerance" : 1.0e-4, "contact_displacement_absolute_tolerance" : 1.0e-9, "frictional_stick_contact_displacement_relative_tolerance" : 1.0e-4, "frictional_stick_contact_displacement_absolute_tolerance" : 1.0e-9, "frictional_slip_contact_displacement_relative_tolerance" : 1.0e-4, "frictional_slip_contact_displacement_absolute_tolerance" : 1.0e-9, "ratio_normal_tangent_threshold" : 1.0e-4 })" ); ThisParameters.ValidateAndAssignDefaults(default_parameters); // The displacement solution mDispRatioTolerance = ThisParameters["displacement_relative_tolerance"].GetDouble(); mDispAbsTolerance = ThisParameters["displacement_absolute_tolerance"].GetDouble(); // The normal contact solution mLMNormalRatioTolerance = ThisParameters["contact_displacement_relative_tolerance"].GetDouble(); mLMNormalAbsTolerance = ThisParameters["contact_displacement_absolute_tolerance"].GetDouble(); // The tangent contact solution mLMTangentStickRatioTolerance = ThisParameters["frictional_stick_contact_displacement_relative_tolerance"].GetDouble(); mLMTangentStickAbsTolerance = ThisParameters["frictional_stick_contact_displacement_absolute_tolerance"].GetDouble(); mLMTangentSlipRatioTolerance = ThisParameters["frictional_slip_contact_displacement_relative_tolerance"].GetDouble(); mLMTangentSlipAbsTolerance = ThisParameters["frictional_slip_contact_displacement_absolute_tolerance"].GetDouble(); // We get the ratio between the normal and tangent that will accepted as converged mNormalTangentRatio = ThisParameters["ratio_normal_tangent_threshold"].GetDouble(); // Set local flags mOptions.Set(DisplacementLagrangeMultiplierFrictionalContactCriteria::ENSURE_CONTACT, ThisParameters["ensure_contact"].GetBool()); mOptions.Set(DisplacementLagrangeMultiplierFrictionalContactCriteria::PRINTING_OUTPUT, ThisParameters["print_convergence_criterion"].GetBool()); mOptions.Set(DisplacementLagrangeMultiplierFrictionalContactCriteria::TABLE_IS_INITIALIZED, false); mOptions.Set(DisplacementLagrangeMultiplierFrictionalContactCriteria::PURE_SLIP, ThisParameters["pure_slip"].GetBool()); } //* Copy constructor. DisplacementLagrangeMultiplierFrictionalContactCriteria( DisplacementLagrangeMultiplierFrictionalContactCriteria const& rOther ) :BaseType(rOther) ,mOptions(rOther.mOptions) ,mDispRatioTolerance(rOther.mDispRatioTolerance) ,mDispAbsTolerance(rOther.mDispAbsTolerance) ,mLMNormalRatioTolerance(rOther.mLMNormalRatioTolerance) ,mLMNormalAbsTolerance(rOther.mLMNormalAbsTolerance) ,mLMTangentStickRatioTolerance(rOther.mLMTangentStickRatioTolerance) ,mLMTangentStickAbsTolerance(rOther.mLMTangentStickAbsTolerance) ,mLMTangentSlipRatioTolerance(rOther.mLMTangentSlipRatioTolerance) ,mLMTangentSlipAbsTolerance(rOther.mLMTangentSlipAbsTolerance) ,mNormalTangentRatio(rOther.mNormalTangentRatio) { } /// Destructor. ~DisplacementLagrangeMultiplierFrictionalContactCriteria() override = default; ///@} ///@name Operators ///@{ /** * @brief Compute relative and absolute error. * @param rModelPart Reference to the ModelPart containing the contact problem. * @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver) * @param rA System matrix (unused) * @param rDx Vector of results (variations on nodal variables) * @param rb RHS vector (residual) * @return true if convergence is achieved, false otherwise */ bool PostCriteria( ModelPart& rModelPart, DofsArrayType& rDofSet, const TSystemMatrixType& rA, const TSystemVectorType& rDx, const TSystemVectorType& rb ) override { if (SparseSpaceType::Size(rDx) != 0) { //if we are solving for something // Getting process info ProcessInfo& r_process_info = rModelPart.GetProcessInfo(); // Initialize TDataType disp_solution_norm = 0.0, normal_lm_solution_norm = 0.0, tangent_lm_stick_solution_norm = 0.0, tangent_lm_slip_solution_norm = 0.0, disp_increase_norm = 0.0, normal_lm_increase_norm = 0.0, tangent_lm_stick_increase_norm = 0.0, tangent_lm_slip_increase_norm = 0.0; IndexType disp_dof_num(0), lm_dof_num(0), lm_stick_dof_num(0), lm_slip_dof_num(0); // First iterator const auto it_dof_begin = rDofSet.begin(); // The nodes array auto& r_nodes_array = rModelPart.Nodes(); // Auxiliar values std::size_t dof_id = 0; TDataType dof_value = 0.0, dof_incr = 0.0; // The number of active dofs const std::size_t number_active_dofs = rb.size(); // Loop over Dofs #pragma omp parallel for firstprivate(dof_id, dof_value, dof_incr) reduction(+:disp_solution_norm, normal_lm_solution_norm, tangent_lm_slip_solution_norm, tangent_lm_stick_solution_norm, disp_increase_norm, normal_lm_increase_norm, tangent_lm_slip_increase_norm, tangent_lm_stick_increase_norm, disp_dof_num, lm_dof_num, lm_stick_dof_num, lm_slip_dof_num) for (int i = 0; i < static_cast<int>(rDofSet.size()); i++) { auto it_dof = it_dof_begin + i; dof_id = it_dof->EquationId(); // Check dof id is solved if (dof_id < number_active_dofs) { if (mActiveDofs[dof_id]) { dof_value = it_dof->GetSolutionStepValue(0); dof_incr = rDx[dof_id]; const auto& r_curr_var = it_dof->GetVariable(); if (r_curr_var == VECTOR_LAGRANGE_MULTIPLIER_X) { // The normal of the node (TODO: how to solve this without accesing all the time to the database?) const auto it_node = r_nodes_array.find(it_dof->Id()); const double mu = it_node->GetValue(FRICTION_COEFFICIENT); if (mu < std::numeric_limits<double>::epsilon()) { normal_lm_solution_norm += std::pow(dof_value, 2); normal_lm_increase_norm += std::pow(dof_incr, 2); } else { const double normal_x = it_node->FastGetSolutionStepValue(NORMAL_X); const TDataType normal_dof_value = dof_value * normal_x; const TDataType normal_dof_incr = dof_incr * normal_x; normal_lm_solution_norm += std::pow(normal_dof_value, 2); normal_lm_increase_norm += std::pow(normal_dof_incr, 2); if (it_node->Is(SLIP) || mOptions.Is(DisplacementLagrangeMultiplierFrictionalContactCriteria::PURE_SLIP)) { tangent_lm_slip_solution_norm += std::pow(dof_value - normal_dof_value, 2); tangent_lm_slip_increase_norm += std::pow(dof_incr - normal_dof_incr, 2); ++lm_slip_dof_num; } else { tangent_lm_stick_solution_norm += std::pow(dof_value - normal_dof_value, 2); tangent_lm_stick_increase_norm += std::pow(dof_incr - normal_dof_incr, 2); ++lm_stick_dof_num; } } ++lm_dof_num; } else if (r_curr_var == VECTOR_LAGRANGE_MULTIPLIER_Y) { // The normal of the node (TODO: how to solve this without accesing all the time to the database?) const auto it_node = r_nodes_array.find(it_dof->Id()); const double mu = it_node->GetValue(FRICTION_COEFFICIENT); if (mu < std::numeric_limits<double>::epsilon()) { normal_lm_solution_norm += std::pow(dof_value, 2); normal_lm_increase_norm += std::pow(dof_incr, 2); } else { const double normal_y = it_node->FastGetSolutionStepValue(NORMAL_Y); const TDataType normal_dof_value = dof_value * normal_y; const TDataType normal_dof_incr = dof_incr * normal_y; normal_lm_solution_norm += std::pow(normal_dof_value, 2); normal_lm_increase_norm += std::pow(normal_dof_incr, 2); if (it_node->Is(SLIP) || mOptions.Is(DisplacementLagrangeMultiplierFrictionalContactCriteria::PURE_SLIP)) { tangent_lm_slip_solution_norm += std::pow(dof_value - normal_dof_value, 2); tangent_lm_slip_increase_norm += std::pow(dof_incr - normal_dof_incr, 2); ++lm_slip_dof_num; } else { tangent_lm_stick_solution_norm += std::pow(dof_value - normal_dof_value, 2); tangent_lm_stick_increase_norm += std::pow(dof_incr - normal_dof_incr, 2); ++lm_stick_dof_num; } } ++lm_dof_num; } else if (r_curr_var == VECTOR_LAGRANGE_MULTIPLIER_Z) { // The normal of the node (TODO: how to solve this without accesing all the time to the database?) const auto it_node = r_nodes_array.find(it_dof->Id()); const double mu = it_node->GetValue(FRICTION_COEFFICIENT); if (mu < std::numeric_limits<double>::epsilon()) { normal_lm_solution_norm += std::pow(dof_value, 2); normal_lm_increase_norm += std::pow(dof_incr, 2); } else { const double normal_z = it_node->FastGetSolutionStepValue(NORMAL_Z); const TDataType normal_dof_value = dof_value * normal_z; const TDataType normal_dof_incr = dof_incr * normal_z; normal_lm_solution_norm += std::pow(normal_dof_value, 2); normal_lm_increase_norm += std::pow(normal_dof_incr, 2); if (it_node->Is(SLIP) || mOptions.Is(DisplacementLagrangeMultiplierFrictionalContactCriteria::PURE_SLIP)) { tangent_lm_slip_solution_norm += std::pow(dof_value - normal_dof_value, 2); tangent_lm_slip_increase_norm += std::pow(dof_incr - normal_dof_incr, 2); ++lm_slip_dof_num; } else { tangent_lm_stick_solution_norm += std::pow(dof_value - normal_dof_value, 2); tangent_lm_stick_increase_norm += std::pow(dof_incr - normal_dof_incr, 2); ++lm_stick_dof_num; } } ++lm_dof_num; } else { // We will assume is displacement dof disp_solution_norm += dof_value * dof_value; disp_increase_norm += dof_incr * dof_incr; ++disp_dof_num; } } } } if(disp_increase_norm < Tolerance) disp_increase_norm = 1.0; if(normal_lm_increase_norm < Tolerance) normal_lm_increase_norm = 1.0; if(tangent_lm_stick_increase_norm < Tolerance) tangent_lm_stick_increase_norm = 1.0; if(tangent_lm_slip_increase_norm < Tolerance) tangent_lm_slip_increase_norm = 1.0; if(disp_solution_norm < Tolerance) disp_solution_norm = 1.0; KRATOS_ERROR_IF(mOptions.Is(DisplacementLagrangeMultiplierFrictionalContactCriteria::ENSURE_CONTACT) && normal_lm_solution_norm < Tolerance) << "WARNING::CONTACT LOST::ARE YOU SURE YOU ARE SUPPOSED TO HAVE CONTACT?" << std::endl; const TDataType disp_ratio = std::sqrt(disp_increase_norm/disp_solution_norm); const TDataType normal_lm_ratio = normal_lm_solution_norm > Tolerance ? std::sqrt(normal_lm_increase_norm/normal_lm_solution_norm) : 0.0; const TDataType tangent_lm_stick_ratio = tangent_lm_stick_solution_norm > Tolerance ? std::sqrt(tangent_lm_stick_increase_norm/tangent_lm_stick_solution_norm) : 0.0; const TDataType tangent_lm_slip_ratio = tangent_lm_slip_solution_norm > Tolerance ? std::sqrt(tangent_lm_slip_increase_norm/tangent_lm_slip_solution_norm) : 0.0; const TDataType disp_abs = std::sqrt(disp_increase_norm)/ static_cast<TDataType>(disp_dof_num); const TDataType normal_lm_abs = std::sqrt(normal_lm_increase_norm)/ static_cast<TDataType>(lm_dof_num); const TDataType tangent_lm_stick_abs = lm_stick_dof_num > 0 ? std::sqrt(tangent_lm_stick_increase_norm)/ static_cast<TDataType>(lm_stick_dof_num) : 0.0; const TDataType tangent_lm_slip_abs = lm_slip_dof_num > 0 ? std::sqrt(tangent_lm_slip_increase_norm)/ static_cast<TDataType>(lm_slip_dof_num) : 0.0; const TDataType normal_tangent_stick_ratio = tangent_lm_stick_abs/normal_lm_abs; const TDataType normal_tangent_slip_ratio = tangent_lm_slip_abs/normal_lm_abs; // We print the results // TODO: Replace for the new log if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) { if (r_process_info.Has(TABLE_UTILITY)) { std::cout.precision(4); TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY]; auto& Table = p_table->GetTable(); Table << disp_ratio << mDispRatioTolerance << disp_abs << mDispAbsTolerance << normal_lm_ratio << mLMNormalRatioTolerance << normal_lm_abs << mLMNormalAbsTolerance << tangent_lm_stick_ratio << mLMTangentStickRatioTolerance << tangent_lm_stick_abs << mLMTangentStickAbsTolerance << tangent_lm_slip_ratio << mLMTangentSlipRatioTolerance << tangent_lm_slip_abs << mLMTangentSlipAbsTolerance; } else { std::cout.precision(4); if (mOptions.IsNot(DisplacementLagrangeMultiplierFrictionalContactCriteria::PRINTING_OUTPUT)) { KRATOS_INFO("DisplacementLagrangeMultiplierFrictionalContactCriteria") << BOLDFONT("DoF ONVERGENCE CHECK") << "\tSTEP: " << r_process_info[STEP] << "\tNL ITERATION: " << r_process_info[NL_ITERATION_NUMBER] << std::endl; KRATOS_INFO("DisplacementLagrangeMultiplierFrictionalContactCriteria") << BOLDFONT("\tDISPLACEMENT: RATIO = ") << disp_ratio << BOLDFONT(" EXP.RATIO = ") << mDispRatioTolerance << BOLDFONT(" ABS = ") << disp_abs << BOLDFONT(" EXP.ABS = ") << mDispAbsTolerance << std::endl; KRATOS_INFO("DisplacementLagrangeMultiplierFrictionalContactCriteria") << BOLDFONT(" NORMAL LAGRANGE MUL:\tRATIO = ") << normal_lm_ratio << BOLDFONT(" EXP.RATIO = ") << mLMNormalRatioTolerance << BOLDFONT(" ABS = ") << normal_lm_abs << BOLDFONT(" EXP.ABS = ") << mLMNormalAbsTolerance << std::endl; KRATOS_INFO("DisplacementLagrangeMultiplierFrictionalContactCriteria") << BOLDFONT(" STICK LAGRANGE MUL:\tRATIO = ") << tangent_lm_stick_ratio << BOLDFONT(" EXP.RATIO = ") << mLMTangentStickRatioTolerance << BOLDFONT(" ABS = ") << tangent_lm_stick_abs << BOLDFONT(" EXP.ABS = ") << mLMTangentStickAbsTolerance << std::endl; KRATOS_INFO("DisplacementLagrangeMultiplierFrictionalContactCriteria") << BOLDFONT(" SLIP LAGRANGE MUL:\tRATIO = ") << tangent_lm_slip_ratio << BOLDFONT(" EXP.RATIO = ") << mLMTangentSlipRatioTolerance << BOLDFONT(" ABS = ") << tangent_lm_slip_abs << BOLDFONT(" EXP.ABS = ") << mLMTangentSlipAbsTolerance << std::endl; } else { KRATOS_INFO("DisplacementLagrangeMultiplierFrictionalContactCriteria") << "DoF ONVERGENCE CHECK" << "\tSTEP: " << r_process_info[STEP] << "\tNL ITERATION: " << r_process_info[NL_ITERATION_NUMBER] << std::endl; KRATOS_INFO("DisplacementLagrangeMultiplierFrictionalContactCriteria") << "\tDISPLACEMENT: RATIO = " << disp_ratio << " EXP.RATIO = " << mDispRatioTolerance << " ABS = " << disp_abs << " EXP.ABS = " << mDispAbsTolerance << std::endl; KRATOS_INFO("DisplacementLagrangeMultiplierFrictionalContactCriteria") << " NORMAL LAGRANGE MUL:\tRATIO = " << normal_lm_ratio << " EXP.RATIO = " << mLMNormalRatioTolerance << " ABS = " << normal_lm_abs << " EXP.ABS = " << mLMNormalAbsTolerance << std::endl; KRATOS_INFO("DisplacementLagrangeMultiplierFrictionalContactCriteria") << " STICK LAGRANGE MUL:\tRATIO = " << tangent_lm_stick_ratio << " EXP.RATIO = " << mLMTangentStickRatioTolerance << " ABS = " << tangent_lm_stick_abs << " EXP.ABS = " << mLMTangentStickAbsTolerance << std::endl; KRATOS_INFO("DisplacementLagrangeMultiplierFrictionalContactCriteria") << " SLIP LAGRANGE MUL:\tRATIO = " << tangent_lm_slip_ratio << " EXP.RATIO = " << mLMTangentSlipRatioTolerance << " ABS = " << tangent_lm_slip_abs << " EXP.ABS = " << mLMTangentSlipAbsTolerance << std::endl; } } } // We check if converged const bool disp_converged = (disp_ratio <= mDispRatioTolerance || disp_abs <= mDispAbsTolerance); const bool lm_converged = (mOptions.IsNot(DisplacementLagrangeMultiplierFrictionalContactCriteria::ENSURE_CONTACT) && normal_lm_solution_norm < Tolerance) ? true : (normal_lm_ratio <= mLMNormalRatioTolerance || normal_lm_abs <= mLMNormalAbsTolerance) && (tangent_lm_stick_ratio <= mLMTangentStickRatioTolerance || tangent_lm_stick_abs <= mLMTangentStickAbsTolerance || normal_tangent_stick_ratio <= mNormalTangentRatio) && (tangent_lm_slip_ratio <= mLMTangentSlipRatioTolerance || tangent_lm_slip_abs <= mLMTangentSlipAbsTolerance || normal_tangent_slip_ratio <= mNormalTangentRatio); if (disp_converged && lm_converged) { if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) { if (r_process_info.Has(TABLE_UTILITY)) { TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY]; auto& r_table = p_table->GetTable(); if (mOptions.IsNot(DisplacementLagrangeMultiplierFrictionalContactCriteria::PRINTING_OUTPUT)) r_table << BOLDFONT(FGRN(" Achieved")); else r_table << "Achieved"; } else { if (mOptions.IsNot(DisplacementLagrangeMultiplierFrictionalContactCriteria::PRINTING_OUTPUT)) KRATOS_INFO("DisplacementLagrangeMultiplierFrictionalContactCriteria") << BOLDFONT("\tDoF") << " convergence is " << BOLDFONT(FGRN("achieved")) << std::endl; else KRATOS_INFO("DisplacementLagrangeMultiplierFrictionalContactCriteria") << "\tDoF convergence is achieved" << std::endl; } } return true; } else { if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) { if (r_process_info.Has(TABLE_UTILITY)) { TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY]; auto& r_table = p_table->GetTable(); if (mOptions.IsNot(DisplacementLagrangeMultiplierFrictionalContactCriteria::PRINTING_OUTPUT)) r_table << BOLDFONT(FRED(" Not achieved")); else r_table << "Not achieved"; } else { if (mOptions.IsNot(DisplacementLagrangeMultiplierFrictionalContactCriteria::PRINTING_OUTPUT)) KRATOS_INFO("DisplacementLagrangeMultiplierFrictionalContactCriteria") << BOLDFONT("\tDoF") << " convergence is " << BOLDFONT(FRED(" not achieved")) << std::endl; else KRATOS_INFO("DisplacementLagrangeMultiplierFrictionalContactCriteria") << "\tDoF convergence is not achieved" << std::endl; } } return false; } } else // In this case all the displacements are imposed! return true; } /** * This function initialize the convergence criteria * @param rModelPart Reference to the ModelPart containing the contact problem. (unused) */ void Initialize( ModelPart& rModelPart ) override { BaseType::mConvergenceCriteriaIsInitialized = true; ProcessInfo& r_process_info = rModelPart.GetProcessInfo(); if (r_process_info.Has(TABLE_UTILITY) && mOptions.IsNot(DisplacementLagrangeMultiplierFrictionalContactCriteria::TABLE_IS_INITIALIZED)) { TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY]; auto& r_table = p_table->GetTable(); r_table.AddColumn("DP RATIO", 10); r_table.AddColumn("EXP. RAT", 10); r_table.AddColumn("ABS", 10); r_table.AddColumn("EXP. ABS", 10); r_table.AddColumn("N.LM RATIO", 10); r_table.AddColumn("EXP. RAT", 10); r_table.AddColumn("ABS", 10); r_table.AddColumn("EXP. ABS", 10); if (mOptions.IsNot(DisplacementLagrangeMultiplierFrictionalContactCriteria::PURE_SLIP)) { r_table.AddColumn("STI. RATIO", 10); r_table.AddColumn("EXP. RAT", 10); r_table.AddColumn("ABS", 10); r_table.AddColumn("EXP. ABS", 10); } r_table.AddColumn("SLIP RATIO", 10); r_table.AddColumn("EXP. RAT", 10); r_table.AddColumn("ABS", 10); r_table.AddColumn("EXP. ABS", 10); r_table.AddColumn("CONVERGENCE", 15); mOptions.Set(DisplacementLagrangeMultiplierFrictionalContactCriteria::TABLE_IS_INITIALIZED, true); } } /** * @brief This function initializes the solution step * @param rModelPart Reference to the ModelPart containing the contact problem. * @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver) * @param rA System matrix (unused) * @param rDx Vector of results (variations on nodal variables) * @param rb RHS vector (residual) */ void InitializeSolutionStep( ModelPart& rModelPart, DofsArrayType& rDofSet, const TSystemMatrixType& rA, const TSystemVectorType& rDx, const TSystemVectorType& rb ) override { // Filling mActiveDofs when MPC exist ConstraintUtilities::ComputeActiveDofs(rModelPart, mActiveDofs, rDofSet); } /** * @brief This function finalizes the non-linear iteration * @param rModelPart Reference to the ModelPart containing the problem. * @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver) * @param rA System matrix (unused) * @param rDx Vector of results (variations on nodal variables) * @param rb RHS vector (residual + reactions) */ void FinalizeNonLinearIteration( ModelPart& rModelPart, DofsArrayType& rDofSet, const TSystemMatrixType& rA, const TSystemVectorType& rDx, const TSystemVectorType& rb ) override { // Calling base criteria BaseType::FinalizeNonLinearIteration(rModelPart, rDofSet, rA, rDx, rb); // The current process info ProcessInfo& r_process_info = rModelPart.GetProcessInfo(); r_process_info.SetValue(ACTIVE_SET_COMPUTED, false); } ///@} ///@name Operations ///@{ ///@} ///@name Acces ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Friends ///@{ protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ Flags mOptions; /// Local flags TDataType mDispRatioTolerance; /// The ratio threshold for the norm of the displacement TDataType mDispAbsTolerance; /// The absolute value threshold for the norm of the displacement TDataType mLMNormalRatioTolerance; /// The ratio threshold for the norm of the LM (normal) TDataType mLMNormalAbsTolerance; /// The absolute value threshold for the norm of the LM (normal) TDataType mLMTangentStickRatioTolerance; /// The ratio threshold for the norm of the LM (tangent-stick) TDataType mLMTangentStickAbsTolerance; /// The absolute value threshold for the norm of the LM (tangent-stick) TDataType mLMTangentSlipRatioTolerance; /// The ratio threshold for the norm of the LM (tangent-slip) TDataType mLMTangentSlipAbsTolerance; /// The absolute value threshold for the norm of the LM (tangent-slip) TDataType mNormalTangentRatio; /// The ratio to accept a non converged tangent component in case std::vector<bool> mActiveDofs; /// This vector contains the dofs that are active ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ ///@} ///@name Private Access ///@{ ///@} ///@} ///@name Serialization ///@{ ///@name Private Inquiry ///@{ ///@} ///@name Unaccessible methods ///@{ ///@} }; // Kratos DisplacementLagrangeMultiplierFrictionalContactCriteria ///@name Local flags creation ///@{ /// Local Flags template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementLagrangeMultiplierFrictionalContactCriteria<TSparseSpace, TDenseSpace>::ENSURE_CONTACT(Kratos::Flags::Create(0)); template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementLagrangeMultiplierFrictionalContactCriteria<TSparseSpace, TDenseSpace>::NOT_ENSURE_CONTACT(Kratos::Flags::Create(0, false)); template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementLagrangeMultiplierFrictionalContactCriteria<TSparseSpace, TDenseSpace>::PRINTING_OUTPUT(Kratos::Flags::Create(1)); template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementLagrangeMultiplierFrictionalContactCriteria<TSparseSpace, TDenseSpace>::NOT_PRINTING_OUTPUT(Kratos::Flags::Create(1, false)); template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementLagrangeMultiplierFrictionalContactCriteria<TSparseSpace, TDenseSpace>::TABLE_IS_INITIALIZED(Kratos::Flags::Create(2)); template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementLagrangeMultiplierFrictionalContactCriteria<TSparseSpace, TDenseSpace>::NOT_TABLE_IS_INITIALIZED(Kratos::Flags::Create(2, false)); template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementLagrangeMultiplierFrictionalContactCriteria<TSparseSpace, TDenseSpace>::PURE_SLIP(Kratos::Flags::Create(3)); template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementLagrangeMultiplierFrictionalContactCriteria<TSparseSpace, TDenseSpace>::NOT_PURE_SLIP(Kratos::Flags::Create(3, false)); } #endif /* KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_FRICTIONAL_CONTACT_CRITERIA_H */
blake2bp.c
/* BLAKE2 reference source code package - optimized C implementations Copyright 2012, Samuel Neves <sneves@dei.uc.pt>. You may use this under the terms of the CC0, the OpenSSL Licence, or the Apache Public License 2.0, at your option. The terms of these licenses can be found at: - CC0 1.0 Universal : http://creativecommons.org/publicdomain/zero/1.0 - OpenSSL license : https://www.openssl.org/source/license.html - Apache 2.0 : http://www.apache.org/licenses/LICENSE-2.0 More information about the BLAKE2 hash function can be found at https://blake2.net. */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <stdint.h> #if defined(_OPENMP) #include <omp.h> #endif #include "blake2.h" #include "blake2-impl.h" #define PARALLELISM_DEGREE 4 BLAKE2_LOCAL_INLINE(int) blake2bp_init_leaf( blake2b_state *S, uint8_t outlen, uint8_t keylen, uint64_t offset ) { blake2b_param P[1]; P->digest_length = outlen; P->key_length = keylen; P->fanout = PARALLELISM_DEGREE; P->depth = 2; P->leaf_length = 0; P->node_offset = offset; P->node_depth = 0; P->inner_length = BLAKE2B_OUTBYTES; memset( P->reserved, 0, sizeof( P->reserved ) ); memset( P->salt, 0, sizeof( P->salt ) ); memset( P->personal, 0, sizeof( P->personal ) ); return blake2b_init_param( S, P ); } BLAKE2_LOCAL_INLINE(int) blake2bp_init_root( blake2b_state *S, uint8_t outlen, uint8_t keylen ) { blake2b_param P[1]; P->digest_length = outlen; P->key_length = keylen; P->fanout = PARALLELISM_DEGREE; P->depth = 2; P->leaf_length = 0; P->node_offset = 0; P->node_depth = 1; P->inner_length = BLAKE2B_OUTBYTES; memset( P->reserved, 0, sizeof( P->reserved ) ); memset( P->salt, 0, sizeof( P->salt ) ); memset( P->personal, 0, sizeof( P->personal ) ); return blake2b_init_param( S, P ); } int blake2bp_init( blake2bp_state *S, const uint8_t outlen ) { if( !outlen || outlen > BLAKE2B_OUTBYTES ) return -1; memset( S->buf, 0, sizeof( S->buf ) ); S->buflen = 0; if( blake2bp_init_root( S->R, outlen, 0 ) < 0 ) return -1; for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) if( blake2bp_init_leaf( S->S[i], outlen, 0, i ) < 0 ) return -1; S->R->last_node = 1; S->S[PARALLELISM_DEGREE - 1]->last_node = 1; return 0; } int blake2bp_init_key( blake2bp_state *S, const uint8_t outlen, const void *key, const uint8_t keylen ) { if( !outlen || outlen > BLAKE2B_OUTBYTES ) return -1; if( !key || !keylen || keylen > BLAKE2B_KEYBYTES ) return -1; memset( S->buf, 0, sizeof( S->buf ) ); S->buflen = 0; if( blake2bp_init_root( S->R, outlen, keylen ) < 0 ) return -1; for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) if( blake2bp_init_leaf( S->S[i], outlen, keylen, i ) < 0 ) return -1; S->R->last_node = 1; S->S[PARALLELISM_DEGREE - 1]->last_node = 1; { uint8_t block[BLAKE2B_BLOCKBYTES]; memset( block, 0, BLAKE2B_BLOCKBYTES ); memcpy( block, key, keylen ); for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) blake2b_update( S->S[i], block, BLAKE2B_BLOCKBYTES ); secure_zero_memory( block, BLAKE2B_BLOCKBYTES ); /* Burn the key from stack */ } return 0; } int blake2bp_update( blake2bp_state *S, const uint8_t *in, uint64_t inlen ) { size_t left = S->buflen; size_t fill = sizeof( S->buf ) - left; if( left && inlen >= fill ) { memcpy( S->buf + left, in, fill ); for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) blake2b_update( S->S[i], S->buf + i * BLAKE2B_BLOCKBYTES, BLAKE2B_BLOCKBYTES ); in += fill; inlen -= fill; left = 0; } #if defined(_OPENMP) #pragma omp parallel shared(S), num_threads(PARALLELISM_DEGREE) #else for( size_t id__ = 0; id__ < PARALLELISM_DEGREE; ++id__ ) #endif { #if defined(_OPENMP) size_t id__ = omp_get_thread_num(); #endif uint64_t inlen__ = inlen; const uint8_t *in__ = ( const uint8_t * )in; in__ += id__ * BLAKE2B_BLOCKBYTES; while( inlen__ >= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES ) { blake2b_update( S->S[id__], in__, BLAKE2B_BLOCKBYTES ); in__ += PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES; inlen__ -= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES; } } in += inlen - inlen % ( PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES ); inlen %= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES; if( inlen > 0 ) memcpy( S->buf + left, in, inlen ); S->buflen = left + inlen; return 0; } int blake2bp_final( blake2bp_state *S, uint8_t *out, const uint8_t outlen ) { uint8_t hash[PARALLELISM_DEGREE][BLAKE2B_OUTBYTES]; for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) { if( S->buflen > i * BLAKE2B_BLOCKBYTES ) { size_t left = S->buflen - i * BLAKE2B_BLOCKBYTES; if( left > BLAKE2B_BLOCKBYTES ) left = BLAKE2B_BLOCKBYTES; blake2b_update( S->S[i], S->buf + i * BLAKE2B_BLOCKBYTES, left ); } blake2b_final( S->S[i], hash[i], BLAKE2B_OUTBYTES ); } for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) blake2b_update( S->R, hash[i], BLAKE2B_OUTBYTES ); return blake2b_final( S->R, out, outlen ); } int blake2bp( uint8_t *out, const void *in, const void *key, uint8_t outlen, uint64_t inlen, uint8_t keylen ) { uint8_t hash[PARALLELISM_DEGREE][BLAKE2B_OUTBYTES]; blake2b_state S[PARALLELISM_DEGREE][1]; blake2b_state FS[1]; /* Verify parameters */ if ( NULL == in && inlen > 0 ) return -1; if ( NULL == out ) return -1; if( NULL == key && keylen > 0 ) return -1; if( !outlen || outlen > BLAKE2B_OUTBYTES ) return -1; if( keylen > BLAKE2B_KEYBYTES ) return -1; for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) if( blake2bp_init_leaf( S[i], outlen, keylen, i ) < 0 ) return -1; S[PARALLELISM_DEGREE - 1]->last_node = 1; /* mark last node */ if( keylen > 0 ) { uint8_t block[BLAKE2B_BLOCKBYTES]; memset( block, 0, BLAKE2B_BLOCKBYTES ); memcpy( block, key, keylen ); for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) blake2b_update( S[i], block, BLAKE2B_BLOCKBYTES ); secure_zero_memory( block, BLAKE2B_BLOCKBYTES ); /* Burn the key from stack */ } #if defined(_OPENMP) #pragma omp parallel shared(S,hash), num_threads(PARALLELISM_DEGREE) #else for( size_t id__ = 0; id__ < PARALLELISM_DEGREE; ++id__ ) #endif { #if defined(_OPENMP) size_t id__ = omp_get_thread_num(); #endif uint64_t inlen__ = inlen; const uint8_t *in__ = ( const uint8_t * )in; in__ += id__ * BLAKE2B_BLOCKBYTES; while( inlen__ >= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES ) { blake2b_update( S[id__], in__, BLAKE2B_BLOCKBYTES ); in__ += PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES; inlen__ -= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES; } if( inlen__ > id__ * BLAKE2B_BLOCKBYTES ) { const size_t left = inlen__ - id__ * BLAKE2B_BLOCKBYTES; const size_t len = left <= BLAKE2B_BLOCKBYTES ? left : BLAKE2B_BLOCKBYTES; blake2b_update( S[id__], in__, len ); } blake2b_final( S[id__], hash[id__], BLAKE2B_OUTBYTES ); } if( blake2bp_init_root( FS, outlen, keylen ) < 0 ) return -1; FS->last_node = 1; /* Mark as last node */ for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) blake2b_update( FS, hash[i], BLAKE2B_OUTBYTES ); return blake2b_final( FS, out, outlen ); } #if defined(BLAKE2BP_SELFTEST) #include <string.h> #include "blake2-kat.h" int main( int argc, char **argv ) { uint8_t key[BLAKE2B_KEYBYTES]; uint8_t buf[KAT_LENGTH]; for( size_t i = 0; i < BLAKE2B_KEYBYTES; ++i ) key[i] = ( uint8_t )i; for( size_t i = 0; i < KAT_LENGTH; ++i ) buf[i] = ( uint8_t )i; for( size_t i = 0; i < KAT_LENGTH; ++i ) { uint8_t hash[BLAKE2B_OUTBYTES]; /*blake2bp( hash, buf, key, BLAKE2B_OUTBYTES, i, BLAKE2B_KEYBYTES ); */ blake2bp_state S[1]; blake2bp_init_key( S, BLAKE2B_OUTBYTES, key, BLAKE2B_KEYBYTES ); blake2bp_update( S, buf, i ); blake2bp_final( S, hash, BLAKE2B_OUTBYTES ); if( 0 != memcmp( hash, blake2bp_keyed_kat[i], BLAKE2B_OUTBYTES ) ) { puts( "error" ); return -1; } } puts( "ok" ); return 0; } #endif
DRACC_OMP_013_Counter_wrong_critical_simd_Intra_yes.c
/* Concurrent access on a counter with the wrong lock, by utilising OpenMP critical directives and simd. Atomicity Violation. Two locks are used to ensure that addition and substraction cannot be interrupted by them selfes on other teams. Although they are able to interrupt eachother leading to a wrong result. Intra Region. Because of the lockstep, the team size and the mapping of threads on the accelerator this code will not be able to cause a Data Race. If the Data Race is induced depends in the implementation of the critical construct on accelerators. */ #include <omp.h> #include <stdio.h> #include <stdbool.h> #define N 10000 #define C 512 int countervar[C]; int init(){ for(int i=0; i<C; i++){ countervar[i]=0; } return 0; } int count(){ #pragma omp target map(tofrom:countervar[0:C]) device(0) #pragma omp teams num_teams(1) #pragma omp distribute parallel for for(int j=0; j<N; j++){ #pragma omp critical(addlock) #pragma omp simd for(int i=0; i<C; i++){ countervar[i]++; } #pragma omp critical(sublock) #pragma omp simd for(int i=0; i<C; i++){ countervar[i] -= 2; } } return 0; } int check(){ bool test = false; for(int i=0; i<C; i++){ if(countervar[i]!= -N){ test = true; } } printf("Memory Access Issue visible: %s\n",test ? "true" : "false"); return 0; } int main(){ init(); count(); check(); return 0; }
advection2d.c
// // acoustics.c // AcousticsNd-CPU // // Created by Manuel Diaz on 7/26/16. // Copyright © 2016 Manuel Diaz. All rights reserved. // #include "advection2d.h" /*******************************/ /* Prints a flattened 1D array */ /*******************************/ void Print2D(REAL *u, const unsigned int nx, const unsigned int ny) { unsigned int i, j; // print a single property on terminal for (j = 0; j < ny; j++) { for (i = 0; i < nx; i++) { printf("%8.2f", u[i+nx*j]); } printf("\n"); } printf("\n"); } /**************************/ /* Write to file 1D array */ /**************************/ void Save2D(REAL *u, const unsigned int nx, const unsigned int ny) { unsigned int i, j; // print result to txt file FILE *pFile = fopen("result.txt", "w"); if (pFile != NULL) { for (j = 0; j < ny; j++) { for (i = 0; i < nx; i++) { //fprintf(pFile, "%d\t %d\t %g\t %g\t %g\n",j,i,u[i+nx*j],v[i+nx*j],p[i+nx*j]); fprintf(pFile, "%g\n",u[i+nx*j]); } } fclose(pFile); } else { printf("Unable to save to file\n"); } } void SaveBinary2D(REAL *u, const unsigned int nx, const unsigned int ny) { /* NOTE: We save our result as float values always! * * In Matlab, the results can be loaded by simply doing * fID = fopen('result.bin'); * result = fread(fID,[4,nx*ny],'float')'; * myplot(result,nx,ny); */ float data; unsigned int i, j, k, xy, o; // print result to txt file FILE *pFile = fopen("result.bin", "w"); if (pFile != NULL) { for (j = 0; j < ny; j++) { for (i = 0; i < nx; i++) { o = i+nx*j; // index data = (float)u[o]; fwrite(&data,sizeof(float),1,pFile); } } fclose(pFile); } else { printf("Unable to save to file\n"); } } /***************************/ /* PRESSURE INITIALIZATION */ /***************************/ void Call_Init2d(const int IC, REAL *u0, const REAL dx, const REAL dy, unsigned int nx, unsigned int ny) { unsigned int i, j, o; switch (IC) { case 1: { // A square jump problem for (j= 0; j < ny; j++) { for (i= 0; i < nx; i++) { o = i+nx*j; if (i>0.4*nx && i<0.6*nx && j>0.4*ny && j<0.6*ny) { u0[o]=10.0E1; } else { u0[o]=0.0; } } } // Set Neumann boundary conditions in global domain u0'[0]=0.0; u0'[NX]=0.0; break; } case 2: { // Homogeneous IC for (j= 0; j < ny; j++) { for (i= 0; i < nx; i++) { o = i+nx*j; u0[o]=0.0; } } break; } // here to add another IC } } /***********************/ /* FDM RECONSTRUCTIONS */ /***********************/ REAL FDM_5_Reconstruct1d( const REAL vmm, const REAL vm, const REAL v, const REAL vp, const REAL vpp, const REAL umm, const REAL um, const REAL u, const REAL up, const REAL upp){ // ************************************************************************* // Input: v(i) = [v(i-2) v(i-1) v(i) v(i+1) v(i+2) v(i+3)]; // Output: res = df/dx; // // Based on: // C.W. Shu's Lectures notes on: 'ENO and WENO schemes for Hyperbolic // Conservation Laws' // // coded by Manuel Diaz, 02.10.2012, NTU Taiwan. // ************************************************************************* // // Domain cells (I{i}) reference: // // | | u(i) | | // | u(i-1) |___________| | // |___________| | u(i+1) | // | | |___________| // ...|-----0-----|-----0-----|-----0-----|... // | i-1 | i | i+1 | // |- +|- +|- +| // i-3/2 i-1/2 i+1/2 i+3/2 // // ENO stencils (S{r}) reference: // // |___________S2__________| // | | // |___________S1__________| | // | | | using only f^{+} // |___________S0__________| | | // ..|---o---|---o---|---o---|---o---|---o---|... // | I{i-2}| I{i-1}| I{i} | I{i+1}| I{i+2}| // -| // i+1/2 // // |___________S0__________| // | | // | |___________S1__________| using only f^{-} // | | | // | | |___________S2__________| // ..|---o---|---o---|---o---|---o---|---o---|... // | I{i-1}| I{i} | I{i+1}| I{i+2}| I{i+3}| // |+ // i+1/2 // // WENO stencil: S{i} = [ I{i-2},...,I{i+3} ] // ************************************************************************* REAL hn, hp, dflux; // Numerical Flux at cell boundary, $v_{i+1/2}^{-}$; hn = ( 2*vmm - 13*vm + 47*v + 27*vp - 3*vpp)/60; // Numerical Flux at cell boundary, $v_{i+1/2}^{+}$; hp = (-3*umm + 27*um + 47*u - 13*up + 2*upp)/60; // Compute the numerical flux v_{i+1/2} dflux = (hn+hp); return dflux; } /***********************/ /* WENO RECONSTRUCTION */ /***********************/ REAL WENO5_Reconstruct1d( const REAL vmm, const REAL vm, const REAL v, const REAL vp, const REAL vpp, const REAL umm, const REAL um, const REAL u, const REAL up, const REAL upp){ // ************************************************************************* // Input: v(i) = [v(i-2) v(i-1) v(i) v(i+1) v(i+2) v(i+3)]; // Output: res = df/dx; // // Based on: // C.W. Shu's Lectures notes on: 'ENO and WENO schemes for Hyperbolic // Conservation Laws' // // coded by Manuel Diaz, 02.10.2012, NTU Taiwan. // ************************************************************************* // // Domain cells (I{i}) reference: // // | | u(i) | | // | u(i-1) |___________| | // |___________| | u(i+1) | // | | |___________| // ...|-----0-----|-----0-----|-----0-----|... // | i-1 | i | i+1 | // |- +|- +|- +| // i-3/2 i-1/2 i+1/2 i+3/2 // // ENO stencils (S{r}) reference: // // |___________S2__________| // | | // |___________S1__________| | // | | | using only f^{+} // |___________S0__________| | | // ..|---o---|---o---|---o---|---o---|---o---|... // | I{i-2}| I{i-1}| I{i} | I{i+1}| I{i+2}| // -| // i+1/2 // // |___________S0__________| // | | // | |___________S1__________| using only f^{-} // | | | // | | |___________S2__________| // ..|---o---|---o---|---o---|---o---|---o---|... // | I{i-1}| I{i} | I{i+1}| I{i+2}| I{i+3}| // |+ // i+1/2 // // WENO stencil: S{i} = [ I{i-2},...,I{i+3} ] // ************************************************************************* REAL B0n, B1n, B2n, B0p, B1p, B2p; REAL w0n, w1n, w2n, w0p, w1p, w2p; REAL a0n, a1n, a2n, a0p, a1p, a2p; REAL alphasumn, alphasump, hn, hp; REAL dflux; // Smooth Indicators (beta factors) B0n = C1312*(vmm-2*vm+v )*(vmm-2*vm+v ) + C14*(vmm-4*vm+3*v)*(vmm-4*vm+3*v); B1n = C1312*(vm -2*v +vp )*(vm -2*v +vp ) + C14*(vm-vp)*(vm-vp); B2n = C1312*(v -2*vp+vpp)*(v -2*vp+vpp) + C14*(3*v-4*vp+vpp)*(3*v-4*vp+vpp); // Alpha weights a0n = D0N/((EPS + B0n)*(EPS + B0n)); a1n = D1N/((EPS + B1n)*(EPS + B1n)); a2n = D2N/((EPS + B2n)*(EPS + B2n)); alphasumn = a0n + a1n + a2n; // ENO stencils weigths w0n = a0n/alphasumn; w1n = a1n/alphasumn; w2n = a2n/alphasumn; // Numerical Flux at cell boundary, $v_{i+1/2}^{-}$; hn = (w0n*(2*vmm- 7*vm + 11*v) + w1n*( -vm + 5*v + 2*vp) + w2n*( 2*v + 5*vp - vpp ))/6; // Smooth Indicators (beta factors) B0p = C1312*(umm-2*um+u )*(umm-2*um +u ) + C14*(umm-4*um+3*u)*(umm-4*um+3*u); B1p = C1312*(um -2*u +up )*(um -2*u +up ) + C14*(um-up)*(um-up); B2p = C1312*(u -2*up+upp)*(u -2*up +upp) + C14*(3*u-4*up+upp)*(3*u-4*up+upp); // Alpha weights a0p = D0P/((EPS + B0p)*(EPS + B0p)); a1p = D1P/((EPS + B1p)*(EPS + B1p)); a2p = D2P/((EPS + B2p)*(EPS + B2p)); alphasump = a0p + a1p + a2p; // ENO stencils weigths w0p = a0p/alphasump; w1p = a1p/alphasump; w2p = a2p/alphasump; // Numerical Flux at cell boundary, $v_{i+1/2}^{+}$; hp = (w0p*( -umm + 5*um + 2*u ) + w1p*( 2*um + 5*u - up ) + w2p*(11*u - 7*up + 2*upp))/6; // Compute the numerical flux v_{i+1/2} dflux = (hn+hp); return dflux; } /*****************/ /* Compute dF/dx */ // <==== parallel strategy: compute serialy by rows or by columns! /*****************/ void Compute_Adv_x( REAL *u, REAL *Lu, const REAL alpha, const unsigned int nx, const unsigned int ny, const REAL dx) { // Temporary variables REAL fu, fu_old, fp, fp_old; REAL f1mm, f1m, f1, f1p, f1pp; REAL g1mm, g1m, g1, g1p, g1pp; // Indexes unsigned int i, j, o; #pragma omp parallel shared (u,Lu) private (j,f1mm,f1m,f1,f1p,f1pp,g1mm,g1m,g1,g1p,g1pp,fu,fu_old,fp,fp_old) { #pragma omp for // Nonlinear Advection for (j = 2; j < ny-3; j++) { o=nx*j; // Old resulst arrays fu_old=0; fp_old=0; f1mm= 0.5*(alpha*u[0+o]+alpha*u[0+o]); // node(i-2) f1m = 0.5*(alpha*u[1+o]+alpha*u[1+o]); // node(i-1) f1 = 0.5*(alpha*u[2+o]+alpha*u[2+o]); // node( i ) imm--im--i--ip--ipp f1p = 0.5*(alpha*u[3+o]+alpha*u[3+o]); // node(i+1) g1mm= 0.5*(alpha*u[1+o]-alpha*u[1+o]); // node(i-1) g1m = 0.5*(alpha*u[2+o]-alpha*u[2+o]); // node( i ) im--i--ip--ipp--ippp g1 = 0.5*(alpha*u[3+o]-alpha*u[3+o]); // node(i+1) g1p = 0.5*(alpha*u[4+o]-alpha*u[4+o]); // node(i+2) for (i = 2; i < nx-3; i++) { // Compute and split fluxes f1pp= 0.5*(alpha*u[i+2+o]+alpha*u[i+2+o]); // node(i+2) ipp g1pp= 0.5*(alpha*u[i+3+o]-alpha*u[i+3+o]); // node(i+3) ippp // Reconstruct fu = WENO5_Reconstruct1d(f1mm,f1m,f1,f1p,f1pp,g1mm,g1m,g1,g1p,g1pp); //fu = FDM_5_Reconstruct1d(f1mm,f1m,f1,f1p,f1pp,g1mm,g1m,g1,g1p,g1pp); // Compute Lu = -dF/dx Lu[i+o]=-(fu-fu_old)/dx; // -dudx // Save old results fu_old=fu; f1mm= f1m; // node(i-2) f1m = f1; // node(i-1) f1 = f1p; // node( i ) imm--im--i--ip--ipp f1p = f1pp; // node(i+1) g1mm= g1m; // node(i-1) g1m = g1; // node( i ) im--i--ip--ipp--ippp g1 = g1p; // node(i+1) g1p = g1pp; // node(i+2) } } } } /*****************/ /* Compute dG/dx */ // <==== parallel strategy: compute serialy by rows or by columns! /*****************/ void Compute_Adv_y( REAL *u, REAL *Lu, const REAL alpha, const unsigned int nx, const unsigned int ny, const REAL dy) { // Temporary variables REAL fu, fu_old, fp, fp_old; REAL f1mm, f1m, f1, f1p, f1pp; REAL g1mm, g1m, g1, g1p, g1pp; // Indexes unsigned int i, j; #pragma omp parallel shared (u,Lu) private (i,f1mm,f1m,f1,f1p,f1pp,g1mm,g1m,g1,g1p,g1pp,fu,fu_old,fp,fp_old) { #pragma omp for // Nonlinear Advection for (i = 2; i < nx-3; i++) { // Old resulst arrays fu_old=0; fp_old=0; f1mm= 0.5*(alpha*u[i+nx*0]+alpha*u[i+nx*0]); // node(i-2) f1m = 0.5*(alpha*u[i+nx*1]+alpha*u[i+nx*1]); // node(i-1) f1 = 0.5*(alpha*u[i+nx*2]+alpha*u[i+nx*2]); // node( i ) imm--im--i--ip--ipp f1p = 0.5*(alpha*u[i+nx*3]+alpha*u[i+nx*3]); // node(i+1) g1mm= 0.5*(alpha*u[i+nx*1]-alpha*u[i+nx*1]); // node(i-1) g1m = 0.5*(alpha*u[i+nx*2]-alpha*u[i+nx*2]); // node( i ) im--i--ip--ipp--ippp g1 = 0.5*(alpha*u[i+nx*3]-alpha*u[i+nx*3]); // node(i+1) g1p = 0.5*(alpha*u[i+nx*4]-alpha*u[i+nx*4]); // node(i+2) for (j = 2; j < ny-3; j++) { // Compute and split fluxes f1pp= 0.5*(alpha*u[i+nx*(j+2)]+alpha*u[i+nx*(j+2)]); // node(i+2) g1pp= 0.5*(alpha*u[i+nx*(j+3)]-alpha*u[i+nx*(j+3)]); // node(i+3) // Reconstruct fu = WENO5_Reconstruct1d(f1mm,f1m,f1,f1p,f1pp,g1mm,g1m,g1,g1p,g1pp); //fu = FDM_5_Reconstruct1d(f1mm,f1m,f1,f1p,f1pp,g1mm,g1m,g1,g1p,g1pp); // Compute Lv = -dG/dy Lu[i+nx*j]-=(fu-fu_old)/dy; // -dudy // Save old results fu_old=fu; f1mm= f1m; // node(i-2) f1m = f1; // node(i-1) f1 = f1p; // node( i ) imm--im--i--ip--ipp f1p = f1pp; // node(i+1) g1mm= g1m; // node(i-1) g1m = g1; // node( i ) im--i--ip--ipp--ippp g1 = g1p; // node(i+1) g1p = g1pp; // node(i+2) } } } } /******************************/ /* Cartesian Laplace Operator */ /******************************/ void Compute_Diff_( REAL *u, REAL *Lu, const REAL diff, const unsigned int nx, const unsigned int ny) { // Using (i,j,k) = [i+N*j+M*N*k] indexes unsigned int i, j, o, n, s, e, w, nn, ss, ee, ww; for (j = 0; j < ny; j++) { for (i = 0; i < nx; i++) { o = i+nx*j; // node( j,i ) nn nn= o+nx+nx;// node(j+2,i) | n = o+nx; // node(j+1,i) n s = o-nx; // node(j-1,i) | ss= o-nx-nx;// node(j-2,i) ww--w--o--e--ee ee= o+2; // node(j,i+2) | e = o+1; // node(j,i+1) s w = o-1; // node(j,i-1) | ww= o-2; // node(j,i-2) ss if (i>1 && i<nx-2 && j>1 && j<ny-2) Lu[o] += diff*(-u[ee]-u[nn]+16*u[n]+16*u[e]-60*u[o]+16*u[w]+16*u[s]-u[ss]-u[ww]); } } } /***********************/ /* Runge Kutta Methods */ // <==== this is perfectly parallel! /***********************/ void Compute_sspRK( REAL *u, const REAL *uo, const REAL *Lu, const int step, const unsigned int nx, const unsigned int ny, const REAL dt) { unsigned int i, j, o; // Compute Runge-Kutta step for (j = 0; j < ny; j++) { for (i = 0; i < nx; i++) { // compute single index o=i+nx*j; // update only internal cells if (i>2 && i<nx-3 && j>2 && j<ny-3) { switch (step) { case 1: // step 1 u[o] = uo[o]+dt*Lu[o]; break; case 2: // step 2 u[o] = 0.75*uo[o]+0.25*(u[o]+dt*Lu[o]); break; case 3: // step 3 u[o] = (uo[o]+2*(u[o]+dt*Lu[o]))/3; break; } } //else do nothing! } } } /******************/ /* COMPUTE GFLOPS */ /******************/ float CalcGflops(float computeTimeInSeconds, unsigned int iterations, unsigned int nx, unsigned int ny) { return iterations*(double)((nx*ny) * 1e-9 * FLOPS)/computeTimeInSeconds; } /***********************/ /* COMPUTE ERROR NORMS */ /***********************/ void CalcError(REAL *u, const REAL t, const REAL dx, const REAL dy, unsigned int nx, unsigned int ny) { unsigned int i, j, o, xy; REAL err = 0., l1_norm = 0., l2_norm = 0., linf_norm = 0.; for (j = 0; j < ny; j++) { for (i = 0; i < nx; i++) { //err = (exp(-2*M_PI*M_PI*t)*SINE_DISTRIBUTION(i,j,dx,dy)) - u[i+nx*j]; err = ((0.1/t)*EXP_DISTRIBUTION(i,j,dx,dy,1.0,t)) - u[i]; l1_norm += fabs(err); l2_norm += err*err; linf_norm = fmax(linf_norm,fabs(err)); } } printf("L1 norm : %e\n", dx*dy*l1_norm); printf("L2 norm : %e\n", sqrt(dx*dy*l2_norm)); printf("Linf norm : %e\n", linf_norm); } /*****************/ /* PRINT SUMMARY */ /*****************/ void PrintSummary(const char* kernelName, const char* optimization, REAL computeTimeInSeconds, float gflops, REAL outputTimeInSeconds, const int computeIterations, const int nx, const int ny) { printf("=========================== %s =======================\n", kernelName); printf("Optimization : %s\n", optimization); printf("Kernel time ex. data transfers : %lf seconds\n", computeTimeInSeconds); printf("===================================================================\n"); printf("Total effective GFLOPs : %lf\n", gflops); printf("===================================================================\n"); printf("2D Grid Size : %d x %d \n", nx,ny); printf("Iterations : %d\n", computeIterations); printf("Final Time : %g\n", outputTimeInSeconds); }
GB_unaryop__lnot_uint64_bool.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_uint64_bool // op(A') function: GB_tran__lnot_uint64_bool // C type: uint64_t // A type: bool // cast: uint64_t cij = (uint64_t) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ bool #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ bool aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, aij) \ uint64_t z = (uint64_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_UINT64 || GxB_NO_BOOL) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_uint64_bool ( uint64_t *Cx, // Cx and Ax may be aliased bool *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_uint64_bool ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
YeeSolver.h
#ifndef PICA_YEESOLVER_H #define PICA_YEESOLVER_H #include "pica/grid/YeeGrid.h" #include "pica/threading/OpenMPHelper.h" namespace pica { class YeeSolver { public: template<Dimension dimension, typename Real> void updateE(YeeGrid<dimension, Real>& grid, Real dt); template<Dimension dimension, typename Real> void updateB(YeeGrid<dimension, Real>& grid, Real dt); private: template<Dimension dimension, typename Real> struct Implementation {}; }; template<Dimension dimension, typename Real> void YeeSolver::updateE(YeeGrid<dimension, Real>& grid, Real dt) { Implementation<dimension, Real>::updateE(grid, dt); } template<Dimension dimension, typename Real> void YeeSolver::updateB(YeeGrid<dimension, Real>& grid, Real dt) { Implementation<dimension, Real>::updateB(grid, dt); } template<typename Real> struct YeeSolver::Implementation<One, Real> { static void updateE(YeeGrid<One, Real>& grid, Real dt) { typedef typename YeeGrid<One, Real>::ValueType ValueType; const ValueType coeffCurrent = -static_cast<ValueType>(4) * Constants<ValueType>::pi() * dt; typedef typename YeeGrid<One, Real>::PositionType PositionType; const ValueType cdt = Constants<ValueType>::c() * dt; const PositionType coeff = PositionType(cdt) / grid.getStep(); typedef typename YeeGrid<One, Real>::IndexType IndexType; const IndexType begin = 0; const IndexType end = grid.getSize() - IndexType(1); #pragma omp parallel for for (int i = begin.x; i < end.x; i++) { grid.ex(i) += coeffCurrent * grid.jx(i); grid.ey(i) += coeffCurrent * grid.jy(i) - coeff.x * (grid.bz(i + 1) - grid.bz(i)); grid.ez(i) += coeffCurrent * grid.jz(i) + coeff.x * (grid.by(i + 1) - grid.by(i)); } } static void updateB(YeeGrid<One, Real>& grid, Real dt) { typedef typename YeeGrid<One, Real>::ValueType ValueType; typedef typename YeeGrid<One, Real>::PositionType PositionType; const ValueType cdt = Constants<ValueType>::c() * dt; const PositionType coeff = PositionType(cdt) / grid.getStep(); typedef typename YeeGrid<One, Real>::IndexType IndexType; const IndexType begin = IndexType(1); const IndexType end = grid.getSize(); #pragma omp parallel for for (int i = begin.x; i < end.x; i++) { grid.by(i) += coeff.x * (grid.ez(i) - grid.ez(i - 1)); grid.bz(i) += -coeff.x * (grid.ey(i) - grid.ey(i - 1)); } } }; template<typename Real> struct YeeSolver::Implementation<Two, Real> { static void updateE(YeeGrid<Two, Real>& grid, Real dt) { typedef typename YeeGrid<Two, Real>::ValueType ValueType; const ValueType coeffCurrent = -static_cast<ValueType>(4) * Constants<ValueType>::pi() * dt; typedef typename YeeGrid<Two, Real>::PositionType PositionType; const ValueType cdt = Constants<ValueType>::c() * dt; const PositionType coeff = PositionType(cdt, cdt) / grid.getStep(); typedef typename YeeGrid<Two, Real>::IndexType IndexType; const IndexType begin(0, 0); const IndexType end = grid.getSize() - IndexType(1, 1); #pragma omp parallel for for (int i = begin.x; i < end.x; i++) for (int j = begin.y; j < end.y; j++) { grid.ex(i, j) += coeffCurrent * grid.jx(i, j) + coeff.y * (grid.bz(i, j + 1) - grid.bz(i, j)); grid.ey(i, j) += coeffCurrent * grid.jy(i, j) - coeff.x * (grid.bz(i + 1, j) - grid.bz(i, j)); grid.ez(i, j) += coeffCurrent * grid.jz(i, j) + coeff.x * (grid.by(i + 1, j) - grid.by(i, j)) - coeff.y * (grid.bx(i, j + 1) - grid.bx(i, j)); } // Edges for (int i = begin.x; i < end.x; i++) grid.ey(i, end.y) += coeffCurrent * grid.jy(i, end.y) - coeff.x * (grid.bz(i + 1, end.y) - grid.bz(i, end.y)); for (int j = begin.y; j < end.y; j++) grid.ex(end.x, j) += coeffCurrent * grid.jx(end.x, j) + coeff.y * (grid.bz(end.x, j + 1) - grid.bz(end.x, j)); } static void updateB(YeeGrid<Two, Real>& grid, Real dt) { typedef typename YeeGrid<Two, Real>::ValueType ValueType; typedef typename YeeGrid<Two, Real>::PositionType PositionType; const ValueType cdt = Constants<ValueType>::c() * dt; const PositionType coeff = PositionType(cdt, cdt) / grid.getStep(); typedef typename YeeGrid<Two, Real>::IndexType IndexType; const IndexType begin(1, 1); const IndexType end = grid.getSize(); #pragma omp parallel for for (int i = begin.x; i < end.x; i++) for (int j = begin.y; j < end.y; j++) { grid.bx(i, j) += -coeff.y * (grid.ez(i, j) - grid.ez(i, j - 1)); grid.by(i, j) += coeff.x * (grid.ez(i, j) - grid.ez(i - 1, j)); grid.bz(i, j) += coeff.y * (grid.ex(i, j) - grid.ex(i, j - 1)) - coeff.x * (grid.ey(i, j) - grid.ey(i - 1, j)); } // Edges for (int i = begin.x; i < end.x; i++) grid.by(i, 0) += coeff.x * (grid.ez(i, 0) - grid.ez(i - 1, 0)); for (int j = begin.y; j < end.y; j++) grid.bx(0, j) += -coeff.y * (grid.ez(0, j) - grid.ez(0, j - 1)); } }; template<typename Real> struct YeeSolver::Implementation<Three, Real> { static void updateE(YeeGrid<Three, Real>& grid, Real dt) { typedef typename YeeGrid<Three, Real>::ValueType ValueType; const ValueType coeffCurrent = -static_cast<ValueType>(4) * Constants<ValueType>::pi() * dt; typedef typename YeeGrid<Three, Real>::PositionType PositionType; const ValueType cdt = Constants<ValueType>::c() * dt; const PositionType coeff = PositionType(cdt, cdt, cdt) / grid.getStep(); typedef typename YeeGrid<Three, Real>::IndexType IndexType; const IndexType begin(0, 0, 0); const IndexType end = grid.getSize() - IndexType(1, 1, 1); #pragma omp parallel for collapse(2) for (int i = begin.x; i < end.x; i++) for (int j = begin.y; j < end.y; j++) for (int k = begin.z; k < end.z; k++) { grid.ex(i, j, k) += coeffCurrent * grid.jx(i, j, k) + coeff.y * (grid.bz(i, j + 1, k) - grid.bz(i, j, k)) - coeff.z * (grid.by(i, j, k + 1) - grid.by(i, j, k)); grid.ey(i, j, k) += coeffCurrent * grid.jy(i, j, k) + coeff.z * (grid.bx(i, j, k + 1) - grid.bx(i, j, k)) - coeff.x * (grid.bz(i + 1, j, k) - grid.bz(i, j, k)); grid.ez(i, j, k) += coeffCurrent * grid.jz(i, j, k) + coeff.x * (grid.by(i + 1, j, k) - grid.by(i, j, k)) - coeff.y * (grid.bx(i, j + 1, k) - grid.bx(i, j, k)); } // Edges #pragma omp parallel for for (int i = begin.x; i < end.x; i++) for (int j = begin.y; j < end.y; j++) grid.ez(i, j, end.z) += coeffCurrent * grid.jz(i, j, end.z) + coeff.x * (grid.by(i + 1, j, end.z) - grid.by(i, j, end.z)) - coeff.y * (grid.bx(i, j + 1, end.z) - grid.bx(i, j, end.z)); #pragma omp parallel for for (int i = begin.x; i < end.x; i++) for (int k = begin.z; k < end.z; k++) grid.ey(i, end.y, k) += coeffCurrent * grid.jy(i, end.y, k) + coeff.z * (grid.bx(i, end.y, k + 1) - grid.bx(i, end.y, k)) - coeff.x * (grid.bz(i + 1, end.y, k) - grid.bz(i, end.y, k)); #pragma omp parallel for for (int j = begin.y; j < end.y; j++) for (int k = begin.z; k < end.z; k++) grid.ex(end.x, j, k) += coeffCurrent * grid.jx(end.x, j, k) + coeff.y * (grid.bz(end.x, j + 1, k) - grid.bz(end.x, j, k)) - coeff.z * (grid.by(end.x, j, k + 1) - grid.by(end.x, j, k)); } static void updateB(YeeGrid<Three, Real>& grid, Real dt) { typedef typename YeeGrid<Three, Real>::ValueType ValueType; typedef typename YeeGrid<Three, Real>::PositionType PositionType; const ValueType cdt = Constants<ValueType>::c() * dt; const PositionType coeff = PositionType(cdt, cdt, cdt) / grid.getStep(); typedef typename YeeGrid<Three, Real>::IndexType IndexType; const IndexType begin(1, 1, 1); const IndexType end = grid.getSize(); #pragma omp parallel for collapse(2) for (int i = begin.x; i < end.x; i++) for (int j = begin.y; j < end.y; j++) for (int k = begin.z; k < end.z; k++) { grid.bx(i, j, k) += coeff.z * (grid.ey(i, j, k) - grid.ey(i, j, k - 1)) - coeff.y * (grid.ez(i, j, k) - grid.ez(i, j - 1, k)); grid.by(i, j, k) += coeff.x * (grid.ez(i, j, k) - grid.ez(i - 1, j, k)) - coeff.z * (grid.ex(i, j, k) - grid.ex(i, j, k - 1)); grid.bz(i, j, k) += coeff.y * (grid.ex(i, j, k) - grid.ex(i, j - 1, k)) - coeff.x * (grid.ey(i, j, k) - grid.ey(i - 1, j, k)); } // Edges #pragma omp parallel for for (int i = begin.x; i < end.x; i++) for (int j = begin.y; j < end.y; j++) grid.bz(i, j, 0) += coeff.y * (grid.ex(i, j, 0) - grid.ex(i, j - 1, 0)) - coeff.x * (grid.ey(i, j, 0) - grid.ey(i - 1, j, 0)); #pragma omp parallel for for (int i = begin.x; i < end.x; i++) for (int k = begin.z; k < end.z; k++) grid.by(i, 0, k) += coeff.x * (grid.ez(i, 0, k) - grid.ez(i - 1, 0, k)) - coeff.z * (grid.ex(i, 0, k) - grid.ex(i, 0, k - 1)); #pragma omp parallel for for (int j = begin.y; j < end.y; j++) for (int k = begin.z; k < end.z; k++) grid.bx(0, j, k) += coeff.z * (grid.ey(0, j, k) - grid.ey(0, j, k - 1)) - coeff.y * (grid.ez(0, j, k) - grid.ez(0, j - 1, k)); } }; } // namespace pica #endif
ZQ_CNN_MTCNN_Interface.h
#ifndef _ZQ_CNN_MTCNN_INTERFACE_H_ #define _ZQ_CNN_MTCNN_INTERFACE_H_ #pragma once #include "ZQ_CNN_Net_Interface.h" #include "ZQ_CNN_Tensor4D_Interface.h" #include "ZQ_CNN_BBoxUtils.h" #include <omp.h> namespace ZQ { template<class ZQ_CNN_Net_Interface, class ZQ_CNN_Tensor4D_Interface, class ZQ_CNN_Tensor4D_Interface_Base> class ZQ_CNN_MTCNN_Interface { public: using string = std::string; ZQ_CNN_MTCNN_Interface() { min_size = 60; thresh[0] = 0.6; thresh[1] = 0.7; thresh[2] = 0.7; nms_thresh[0] = 0.6; nms_thresh[1] = 0.7; nms_thresh[2] = 0.7; width = 0; height = 0; factor = 0.709; pnet_overlap_thresh_count = 4; pnet_size = 12; pnet_stride = 2; special_handle_very_big_face = false; force_run_pnet_multithread = false; show_debug_info = false; limit_r_num = 0; limit_o_num = 0; limit_l_num = 0; } ~ZQ_CNN_MTCNN_Interface() { } private: #if __ARM_NEON const int BATCH_SIZE = 16; #else const int BATCH_SIZE = 64; #endif std::vector<ZQ_CNN_Net_Interface> pnet, rnet, onet, lnet; bool has_lnet; int thread_num; float thresh[3], nms_thresh[3]; int min_size; int width, height; float factor; int pnet_overlap_thresh_count; int pnet_size; int pnet_stride; int rnet_size; int onet_size; int lnet_size; bool special_handle_very_big_face; bool do_landmark; float early_accept_thresh; float nms_thresh_per_scale; bool force_run_pnet_multithread; std::vector<float> scales; std::vector<ZQ_CNN_Tensor4D_Interface> pnet_images; ZQ_CNN_Tensor4D_Interface ori_input, rnet_image, onet_image; bool show_debug_info; int limit_r_num; int limit_o_num; int limit_l_num; public: void TurnOnShowDebugInfo() { show_debug_info = true; } void TurnOffShowDebugInfo() { show_debug_info = false; } void SetLimit(int limit_r = 0, int limit_o = 0, int limit_l = 0) { limit_r_num = limit_r; limit_o_num = limit_o; limit_l_num = limit_l; } bool Init(const string& pnet_param, const string& pnet_model, const string& rnet_param, const string& rnet_model, const string& onet_param, const string& onet_model, int thread_num = 1, bool has_lnet = false, const string& lnet_param = "", const std::string& lnet_model = "") { if (thread_num < 1) force_run_pnet_multithread = true; else force_run_pnet_multithread = false; thread_num = __max(1, thread_num); pnet.resize(thread_num); rnet.resize(thread_num); onet.resize(thread_num); this->has_lnet = has_lnet; if (has_lnet) { lnet.resize(thread_num); } bool ret = true; for (int i = 0; i < thread_num; i++) { ret = pnet[i].LoadFrom(pnet_param, pnet_model, true, 1e-9, true) && rnet[i].LoadFrom(rnet_param, rnet_model, true, 1e-9, true) && onet[i].LoadFrom(onet_param, onet_model, true, 1e-9, true); if (has_lnet && ret) ret = lnet[i].LoadFrom(lnet_param, lnet_model, true, 1e-9, true); if (!ret) break; } if (!ret) { pnet.clear(); rnet.clear(); onet.clear(); if (has_lnet) lnet.clear(); this->thread_num = 0; } else this->thread_num = thread_num; if (show_debug_info) { printf("rnet = %.2f M, onet = %.2f M\n", rnet[0].GetNumOfMulAdd() / (1024.0*1024.0), onet[0].GetNumOfMulAdd() / (1024.0*1024.0)); if (has_lnet) printf("lnet = %.2f M\n", lnet[0].GetNumOfMulAdd() / (1024.0*1024.0)); } int C, H, W; rnet[0].GetInputDim(C, H, W); rnet_size = H; onet[0].GetInputDim(C, H, W); onet_size = H; if (has_lnet) { lnet[0].GetInputDim(C, H, W); lnet_size = H; } return ret; } void SetPara(int w, int h, int min_face_size = 60, float pthresh = 0.6, float rthresh = 0.7, float othresh = 0.7, float nms_pthresh = 0.6, float nms_rthresh = 0.7, float nms_othresh = 0.7, float scale_factor = 0.709, int pnet_overlap_thresh_count = 4, int pnet_size = 12, int pnet_stride = 2, bool special_handle_very_big_face = false, bool do_landmark = true, float early_accept_thresh = 1.00) { min_size = __max(pnet_size, min_face_size); thresh[0] = __max(0.1, pthresh); thresh[1] = __max(0.1, rthresh); thresh[2] = __max(0.1, othresh); nms_thresh[0] = __max(0.1, nms_pthresh); nms_thresh[1] = __max(0.1, nms_rthresh); nms_thresh[2] = __max(0.1, nms_othresh); scale_factor = __max(0.5, __min(0.97, scale_factor)); this->pnet_overlap_thresh_count = __max(0, pnet_overlap_thresh_count); this->pnet_size = pnet_size; this->pnet_stride = pnet_stride; this->special_handle_very_big_face = special_handle_very_big_face; this->do_landmark = do_landmark; this->early_accept_thresh = early_accept_thresh; if (pnet_size == 20 && pnet_stride == 4) nms_thresh_per_scale = 0.45; else nms_thresh_per_scale = 0.495; if (width != w || height != h || factor != scale_factor) { scales.clear(); pnet_images.clear(); width = w; height = h; float minside = __min(width, height); int MIN_DET_SIZE = pnet_size; float m = (float)MIN_DET_SIZE / min_size; minside *= m; while (minside > MIN_DET_SIZE) { scales.push_back(m); minside *= factor; m *= factor; } minside = __min(width, height); int count = scales.size(); for (int i = scales.size() - 1; i >= 0; i--) { if (ceil(scales[i] * minside) <= pnet_size) { count--; } } if (special_handle_very_big_face) { if (count > 2) count--; scales.resize(count); if (count > 0) { float last_size = ceil(scales[count - 1] * minside); for (int tmp_size = last_size - 1; tmp_size >= pnet_size + 1; tmp_size -= 2) { scales.push_back((float)tmp_size / minside); count++; } } scales.push_back((float)pnet_size / minside); count++; } else { scales.push_back((float)pnet_size / minside); count++; } pnet_images.resize(count); } } bool Find(const unsigned char* bgr_img, int _width, int _height, int _widthStep, std::vector<ZQ_CNN_BBox>& results) { double t1 = omp_get_wtime(); if (width != _width || height != _height) return false; if (!ori_input.ConvertFromBGR(bgr_img, width, height, _widthStep)) return false; double t2 = omp_get_wtime(); if (show_debug_info) printf("convert cost: %.3f ms\n", 1000 * (t2 - t1)); return Find(ori_input, results); } bool Find106(const unsigned char* bgr_img, int _width, int _height, int _widthStep, std::vector<ZQ_CNN_BBox106>& results) { double t1 = omp_get_wtime(); if (width != _width || height != _height) return false; if (!ori_input.ConvertFromBGR(bgr_img, width, height, _widthStep)) return false; double t2 = omp_get_wtime(); if (show_debug_info) printf("convert cost: %.3f ms\n", 1000 * (t2 - t1)); return Find106(ori_input, results); } bool Find(ZQ_CNN_Tensor4D_Interface& input, std::vector<ZQ_CNN_BBox>& results) { double t1 = omp_get_wtime(); std::vector<ZQ_CNN_BBox> firstBbox, secondBbox, thirdBbox; if (!_Pnet_stage(input, firstBbox)) return false; //results = firstBbox; //return true; if (limit_r_num > 0) { _select(firstBbox, limit_r_num, input.GetW(), input.GetH()); } double t2 = omp_get_wtime(); if (!_Rnet_stage(input, firstBbox, secondBbox)) return false; //results = secondBbox; //return true; if (limit_o_num > 0) { _select(secondBbox, limit_o_num, input.GetW(), input.GetH()); } if (!has_lnet || !do_landmark) { double t3 = omp_get_wtime(); if (!_Onet_stage(input, secondBbox, results)) return false; double t4 = omp_get_wtime(); if (show_debug_info) { printf("final found num: %d\n", (int)results.size()); printf("total cost: %.3f ms (P: %.3f ms, R: %.3f ms, O: %.3f ms)\n", 1000 * (t4 - t1), 1000 * (t2 - t1), 1000 * (t3 - t2), 1000 * (t4 - t3)); } } else { double t3 = omp_get_wtime(); if (!_Onet_stage(input, secondBbox, thirdBbox)) return false; if (limit_l_num > 0) { _select(thirdBbox, limit_l_num, input.GetW(), input.GetH()); } double t4 = omp_get_wtime(); if (!_Lnet_stage(input, thirdBbox, results)) return false; double t5 = omp_get_wtime(); if (show_debug_info) { printf("final found num: %d\n", (int)results.size()); printf("total cost: %.3f ms (P: %.3f ms, R: %.3f ms, O: %.3f ms, L: %.3f ms)\n", 1000 * (t5 - t1), 1000 * (t2 - t1), 1000 * (t3 - t2), 1000 * (t4 - t3), 1000 * (t5 - t4)); } } return true; } bool Find106(ZQ_CNN_Tensor4D_Interface& input, std::vector<ZQ_CNN_BBox106>& results) { double t1 = omp_get_wtime(); std::vector<ZQ_CNN_BBox> firstBbox, secondBbox, thirdBbox; if (!_Pnet_stage(input, firstBbox)) return false; //results = firstBbox; //return true; if (limit_r_num > 0) { _select(firstBbox, limit_r_num, input.GetW(), input.GetH()); } double t2 = omp_get_wtime(); if (!_Rnet_stage(input, firstBbox, secondBbox)) return false; //results = secondBbox; //return true; if (limit_o_num > 0) { _select(secondBbox, limit_o_num, input.GetW(), input.GetH()); } if (!has_lnet || !do_landmark) { return false; } double t3 = omp_get_wtime(); if (!_Onet_stage(input, secondBbox, thirdBbox)) return false; if (limit_l_num > 0) { _select(thirdBbox, limit_l_num, input.GetW(), input.GetH()); } double t4 = omp_get_wtime(); if (!_Lnet106_stage(input, thirdBbox, results)) return false; double t5 = omp_get_wtime(); if (show_debug_info) { printf("final found num: %d\n", (int)results.size()); printf("total cost: %.3f ms (P: %.3f ms, R: %.3f ms, O: %.3f ms, L: %.3f ms)\n", 1000 * (t5 - t1), 1000 * (t2 - t1), 1000 * (t3 - t2), 1000 * (t4 - t3), 1000 * (t5 - t4)); } return true; } private: void _compute_Pnet_single_thread(ZQ_CNN_Tensor4D_Interface& input, std::vector<std::vector<float> >& maps, std::vector<int>& mapH, std::vector<int>& mapW) { int scale_num = 0; for (int i = 0; i < scales.size(); i++) { int changedH = (int)ceil(height*scales[i]); int changedW = (int)ceil(width*scales[i]); if (changedH < pnet_size || changedW < pnet_size) continue; scale_num++; mapH.push_back((changedH - pnet_size) / pnet_stride + 1); mapW.push_back((changedW - pnet_size) / pnet_stride + 1); } maps.resize(scale_num); for (int i = 0; i < scale_num; i++) { maps[i].resize(mapH[i] * mapW[i]); } for (int i = 0; i < scale_num; i++) { int changedH = (int)ceil(height*scales[i]); int changedW = (int)ceil(width*scales[i]); float cur_scale_x = (float)width / changedW; float cur_scale_y = (float)height / changedH; double t10 = omp_get_wtime(); if (scales[i] != 1) { input.ResizeBilinear(pnet_images[i], changedW, changedH, 0, 0); } double t11 = omp_get_wtime(); if (scales[i] != 1) pnet[0].Forward(pnet_images[i]); else pnet[0].Forward(input); double t12 = omp_get_wtime(); if (show_debug_info) printf("Pnet [%d]: resolution [%dx%d], resize:%.3f ms, cost:%.3f ms\n", i, changedW, changedH, 1000 * (t11 - t10), 1000 * (t12 - t11)); const ZQ_CNN_Tensor4D_Interface_Base* score = pnet[0].GetBlobByName("prob1"); //score p int scoreH = score->GetH(); int scoreW = score->GetW(); int scorePixStep = score->GetPixelStep(); const float *p = score->GetFirstPixelPtr() + 1; for (int row = 0; row < scoreH; row++) { for (int col = 0; col < scoreW; col++) { if (row < mapH[i] && col < mapW[i]) maps[i][row*mapW[i] + col] = *p; p += scorePixStep; } } } } void _compute_Pnet_multi_thread(ZQ_CNN_Tensor4D_Interface& input, std::vector<std::vector<float> >& maps, std::vector<int>& mapH, std::vector<int>& mapW) { if (thread_num <= 1) { for (int i = 0; i < scales.size(); i++) { int changedH = (int)ceil(height*scales[i]); int changedW = (int)ceil(width*scales[i]); if (changedH < pnet_size || changedW < pnet_size) continue; if (scales[i] != 1) { input.ResizeBilinear(pnet_images[i], changedW, changedH, 0, 0); } } } else { #pragma omp parallel for num_threads(thread_num) schedule(dynamic, 1) for (int i = 0; i < scales.size(); i++) { int changedH = (int)ceil(height*scales[i]); int changedW = (int)ceil(width*scales[i]); if (changedH < pnet_size || changedW < pnet_size) continue; if (scales[i] != 1) { input.ResizeBilinear(pnet_images[i], changedW, changedH, 0, 0); } } } int scale_num = 0; for (int i = 0; i < scales.size(); i++) { int changedH = (int)ceil(height*scales[i]); int changedW = (int)ceil(width*scales[i]); if (changedH < pnet_size || changedW < pnet_size) continue; scale_num++; mapH.push_back((changedH - pnet_size) / pnet_stride + 1); mapW.push_back((changedW - pnet_size) / pnet_stride + 1); } maps.resize(scale_num); for (int i = 0; i < scale_num; i++) { maps[i].resize(mapH[i] * mapW[i]); } std::vector<int> task_rect_off_x; std::vector<int> task_rect_off_y; std::vector<int> task_rect_width; std::vector<int> task_rect_height; std::vector<float> task_scale; std::vector<int> task_scale_id; int stride = pnet_stride; const int block_size = 64 * stride; int cellsize = pnet_size; int border_size = cellsize - stride; int overlap_border_size = cellsize / stride; int jump_size = block_size - border_size; for (int i = 0; i < scales.size(); i++) { int changeH = (int)ceil(height*scales[i]); int changeW = (int)ceil(width*scales[i]); if (changeH < pnet_size || changeW < pnet_size) continue; int block_H_num = 0; int block_W_num = 0; int start = 0; while (start < changeH) { block_H_num++; if (start + block_size >= changeH) break; start += jump_size; } start = 0; while (start < changeW) { block_W_num++; if (start + block_size >= changeW) break; start += jump_size; } for (int s = 0; s < block_H_num; s++) { for (int t = 0; t < block_W_num; t++) { int rect_off_x = t * jump_size; int rect_off_y = s * jump_size; int rect_width = __min(changeW, rect_off_x + block_size) - rect_off_x; int rect_height = __min(changeH, rect_off_y + block_size) - rect_off_y; if (rect_width >= cellsize && rect_height >= cellsize) { task_rect_off_x.push_back(rect_off_x); task_rect_off_y.push_back(rect_off_y); task_rect_width.push_back(rect_width); task_rect_height.push_back(rect_height); task_scale.push_back(scales[i]); task_scale_id.push_back(i); } } } } // int task_num = task_scale.size(); std::vector<ZQ_CNN_Tensor4D_Interface> task_pnet_images(thread_num); if (thread_num <= 1) { for (int i = 0; i < task_num; i++) { int thread_id = omp_get_thread_num(); int scale_id = task_scale_id[i]; float cur_scale = task_scale[i]; int i_rect_off_x = task_rect_off_x[i]; int i_rect_off_y = task_rect_off_y[i]; int i_rect_width = task_rect_width[i]; int i_rect_height = task_rect_height[i]; if (scale_id == 0 && scales[0] == 1) { if (!input.ROI(task_pnet_images[thread_id], i_rect_off_x, i_rect_off_y, i_rect_width, i_rect_height, 0, 0)) continue; } else { if (!pnet_images[scale_id].ROI(task_pnet_images[thread_id], i_rect_off_x, i_rect_off_y, i_rect_width, i_rect_height, 0, 0)) continue; } if (!pnet[thread_id].Forward(task_pnet_images[thread_id])) continue; const ZQ_CNN_Tensor4D_Interface_Base* score = pnet[thread_id].GetBlobByName("prob1"); int task_count = 0; //score p int scoreH = score->GetH(); int scoreW = score->GetW(); int scorePixStep = score->GetPixelStep(); const float *p = score->GetFirstPixelPtr() + 1; ZQ_CNN_BBox bbox; ZQ_CNN_OrderScore order; for (int row = 0; row < scoreH; row++) { for (int col = 0; col < scoreW; col++) { int real_row = row + i_rect_off_y / stride; int real_col = col + i_rect_off_x / stride; if (real_row < mapH[scale_id] && real_col < mapW[scale_id]) maps[scale_id][real_row*mapW[scale_id] + real_col] = *p; p += scorePixStep; } } } } else { #pragma omp parallel for num_threads(thread_num) for (int i = 0; i < task_num; i++) { int thread_id = omp_get_thread_num(); int scale_id = task_scale_id[i]; float cur_scale = task_scale[i]; int i_rect_off_x = task_rect_off_x[i]; int i_rect_off_y = task_rect_off_y[i]; int i_rect_width = task_rect_width[i]; int i_rect_height = task_rect_height[i]; if (scale_id == 0 && scales[0] == 1) { if (!input.ROI(task_pnet_images[thread_id], i_rect_off_x, i_rect_off_y, i_rect_width, i_rect_height, 0, 0)) continue; } else { if (!pnet_images[scale_id].ROI(task_pnet_images[thread_id], i_rect_off_x, i_rect_off_y, i_rect_width, i_rect_height, 0, 0)) continue; } if (!pnet[thread_id].Forward(task_pnet_images[thread_id])) continue; const ZQ_CNN_Tensor4D_Interface_Base* score = pnet[thread_id].GetBlobByName("prob1"); int task_count = 0; //score p int scoreH = score->GetH(); int scoreW = score->GetW(); int scorePixStep = score->GetPixelStep(); const float *p = score->GetFirstPixelPtr() + 1; ZQ_CNN_BBox bbox; ZQ_CNN_OrderScore order; for (int row = 0; row < scoreH; row++) { for (int col = 0; col < scoreW; col++) { int real_row = row + i_rect_off_y / stride; int real_col = col + i_rect_off_x / stride; if (real_row < mapH[scale_id] && real_col < mapW[scale_id]) maps[scale_id][real_row*mapW[scale_id] + real_col] = *p; p += scorePixStep; } } } } } bool _Pnet_stage(ZQ_CNN_Tensor4D_Interface& input, std::vector<ZQ_CNN_BBox>& firstBbox) { if (thread_num <= 0) return false; double t1 = omp_get_wtime(); firstBbox.clear(); std::vector<std::vector<float> > maps; std::vector<int> mapH; std::vector<int> mapW; if (thread_num == 1 && !force_run_pnet_multithread) { pnet[0].TurnOffShowDebugInfo(); //pnet[0].TurnOnShowDebugInfo(); _compute_Pnet_single_thread(input, maps, mapH, mapW); } else { _compute_Pnet_multi_thread(input, maps, mapH, mapW); } ZQ_CNN_OrderScore order; std::vector<std::vector<ZQ_CNN_BBox> > bounding_boxes(scales.size()); std::vector<std::vector<ZQ_CNN_OrderScore> > bounding_scores(scales.size()); const int block_size = 32; int stride = pnet_stride; int cellsize = pnet_size; int border_size = cellsize / stride; for (int i = 0; i < maps.size(); i++) { double t13 = omp_get_wtime(); int changedH = (int)ceil(height*scales[i]); int changedW = (int)ceil(width*scales[i]); if (changedH < pnet_size || changedW < pnet_size) continue; float cur_scale_x = (float)width / changedW; float cur_scale_y = (float)height / changedH; int count = 0; //score p int scoreH = mapH[i]; int scoreW = mapW[i]; const float *p = &maps[i][0]; if (scoreW <= block_size && scoreH < block_size) { ZQ_CNN_BBox bbox; ZQ_CNN_OrderScore order; for (int row = 0; row < scoreH; row++) { for (int col = 0; col < scoreW; col++) { if (*p > thresh[0]) { bbox.score = *p; order.score = *p; order.oriOrder = count; bbox.row1 = stride*row; bbox.col1 = stride*col; bbox.row2 = stride*row + cellsize; bbox.col2 = stride*col + cellsize; bbox.exist = true; bbox.area = (bbox.row2 - bbox.row1)*(bbox.col2 - bbox.col1); bbox.need_check_overlap_count = (row >= border_size && row < scoreH - border_size) && (col >= border_size && col < scoreW - border_size); bounding_boxes[i].push_back(bbox); bounding_scores[i].push_back(order); count++; } p++; } } int before_count = bounding_boxes[i].size(); ZQ_CNN_BBoxUtils::_nms(bounding_boxes[i], bounding_scores[i], nms_thresh_per_scale, "Union", pnet_overlap_thresh_count); int after_count = bounding_boxes[i].size(); for (int j = 0; j < after_count; j++) { ZQ_CNN_BBox& bbox = bounding_boxes[i][j]; bbox.row1 = round(bbox.row1 *cur_scale_y); bbox.col1 = round(bbox.col1 *cur_scale_x); bbox.row2 = round(bbox.row2 *cur_scale_y); bbox.col2 = round(bbox.col2 *cur_scale_x); bbox.area = (bbox.row2 - bbox.row1)*(bbox.col2 - bbox.col1); } double t14 = omp_get_wtime(); if (show_debug_info) printf("nms cost: %.3f ms, (%d-->%d)\n", 1000 * (t14 - t13), before_count, after_count); } else { int before_count = 0, after_count = 0; int block_H_num = __max(1, scoreH / block_size); int block_W_num = __max(1, scoreW / block_size); int block_num = block_H_num*block_W_num; int width_per_block = scoreW / block_W_num; int height_per_block = scoreH / block_H_num; std::vector<std::vector<ZQ_CNN_BBox> > tmp_bounding_boxes(block_num); std::vector<std::vector<ZQ_CNN_OrderScore> > tmp_bounding_scores(block_num); std::vector<int> block_start_w(block_num), block_end_w(block_num); std::vector<int> block_start_h(block_num), block_end_h(block_num); for (int bh = 0; bh < block_H_num; bh++) { for (int bw = 0; bw < block_W_num; bw++) { int bb = bh * block_W_num + bw; block_start_w[bb] = (bw == 0) ? 0 : (bw*width_per_block - border_size); block_end_w[bb] = (bw == block_num - 1) ? scoreW : ((bw + 1)*width_per_block); block_start_h[bb] = (bh == 0) ? 0 : (bh*height_per_block - border_size); block_end_h[bb] = (bh == block_num - 1) ? scoreH : ((bh + 1)*height_per_block); } } int chunk_size = 1;// ceil((float)block_num / thread_num); if (thread_num <= 1) { for (int bb = 0; bb < block_num; bb++) { ZQ_CNN_BBox bbox; ZQ_CNN_OrderScore order; int count = 0; for (int row = block_start_h[bb]; row < block_end_h[bb]; row++) { p = &maps[i][0] + row*scoreW + block_start_w[bb]; for (int col = block_start_w[bb]; col < block_end_w[bb]; col++) { if (*p > thresh[0]) { bbox.score = *p; order.score = *p; order.oriOrder = count; bbox.row1 = stride*row; bbox.col1 = stride*col; bbox.row2 = stride*row + cellsize; bbox.col2 = stride*col + cellsize; bbox.exist = true; bbox.need_check_overlap_count = (row >= border_size && row < scoreH - border_size) && (col >= border_size && col < scoreW - border_size); bbox.area = (bbox.row2 - bbox.row1)*(bbox.col2 - bbox.col1); tmp_bounding_boxes[bb].push_back(bbox); tmp_bounding_scores[bb].push_back(order); count++; } p++; } } int tmp_before_count = tmp_bounding_boxes[bb].size(); ZQ_CNN_BBoxUtils::_nms(tmp_bounding_boxes[bb], tmp_bounding_scores[bb], nms_thresh_per_scale, "Union", pnet_overlap_thresh_count); int tmp_after_count = tmp_bounding_boxes[bb].size(); before_count += tmp_before_count; after_count += tmp_after_count; } } else { #pragma omp parallel for schedule(dynamic, chunk_size) num_threads(thread_num) for (int bb = 0; bb < block_num; bb++) { ZQ_CNN_BBox bbox; ZQ_CNN_OrderScore order; int count = 0; for (int row = block_start_h[bb]; row < block_end_h[bb]; row++) { const float* p = &maps[i][0] + row*scoreW + block_start_w[bb]; for (int col = block_start_w[bb]; col < block_end_w[bb]; col++) { if (*p > thresh[0]) { bbox.score = *p; order.score = *p; order.oriOrder = count; bbox.row1 = stride*row; bbox.col1 = stride*col; bbox.row2 = stride*row + cellsize; bbox.col2 = stride*col + cellsize; bbox.exist = true; bbox.need_check_overlap_count = (row >= border_size && row < scoreH - border_size) && (col >= border_size && col < scoreW - border_size); bbox.area = (bbox.row2 - bbox.row1)*(bbox.col2 - bbox.col1); tmp_bounding_boxes[bb].push_back(bbox); tmp_bounding_scores[bb].push_back(order); count++; } p++; } } int tmp_before_count = tmp_bounding_boxes[bb].size(); ZQ_CNN_BBoxUtils::_nms(tmp_bounding_boxes[bb], tmp_bounding_scores[bb], nms_thresh_per_scale, "Union", pnet_overlap_thresh_count); int tmp_after_count = tmp_bounding_boxes[bb].size(); before_count += tmp_before_count; after_count += tmp_after_count; } } count = 0; for (int bb = 0; bb < block_num; bb++) { std::vector<ZQ_CNN_BBox>::iterator it = tmp_bounding_boxes[bb].begin(); for (; it != tmp_bounding_boxes[bb].end(); it++) { if ((*it).exist) { bounding_boxes[i].push_back(*it); order.score = (*it).score; order.oriOrder = count; bounding_scores[i].push_back(order); count++; } } } //ZQ_CNN_BBoxUtils::_nms(bounding_boxes[i], bounding_scores[i], nms_thresh_per_scale, "Union", 0); after_count = bounding_boxes[i].size(); for (int j = 0; j < after_count; j++) { ZQ_CNN_BBox& bbox = bounding_boxes[i][j]; bbox.row1 = round(bbox.row1 *cur_scale_y); bbox.col1 = round(bbox.col1 *cur_scale_x); bbox.row2 = round(bbox.row2 *cur_scale_y); bbox.col2 = round(bbox.col2 *cur_scale_x); bbox.area = (bbox.row2 - bbox.row1)*(bbox.col2 - bbox.col1); } double t14 = omp_get_wtime(); if (show_debug_info) printf("nms cost: %.3f ms, (%d-->%d)\n", 1000 * (t14 - t13), before_count, after_count); } } std::vector<ZQ_CNN_OrderScore> firstOrderScore; int count = 0; for (int i = 0; i < scales.size(); i++) { std::vector<ZQ_CNN_BBox>::iterator it = bounding_boxes[i].begin(); for (; it != bounding_boxes[i].end(); it++) { if ((*it).exist) { firstBbox.push_back(*it); order.score = (*it).score; order.oriOrder = count; firstOrderScore.push_back(order); count++; } } } //the first stage's nms if (count < 1) return false; double t15 = omp_get_wtime(); ZQ_CNN_BBoxUtils::_nms(firstBbox, firstOrderScore, nms_thresh[0], "Union", 0, 1); ZQ_CNN_BBoxUtils::_refine_and_square_bbox(firstBbox, width, height, true); double t16 = omp_get_wtime(); if (show_debug_info) printf("nms cost: %.3f ms\n", 1000 * (t16 - t15)); if (show_debug_info) printf("first stage candidate count: %d\n", count); double t3 = omp_get_wtime(); if (show_debug_info) printf("stage 1: cost %.3f ms\n", 1000 * (t3 - t1)); return true; } bool _Rnet_stage(const ZQ_CNN_Tensor4D_Interface& input, std::vector<ZQ_CNN_BBox>& firstBbox, std::vector<ZQ_CNN_BBox>& secondBbox) { double t3 = omp_get_wtime(); secondBbox.clear(); std::vector<ZQ_CNN_BBox>::iterator it = firstBbox.begin(); std::vector<ZQ_CNN_OrderScore> secondScore; std::vector<int> src_off_x, src_off_y, src_rect_w, src_rect_h; int r_count = 0; for (; it != firstBbox.end(); it++) { if ((*it).exist) { int off_x = it->col1; int off_y = it->row1; int rect_w = it->col2 - off_x; int rect_h = it->row2 - off_y; if (/*off_x < 0 || off_x + rect_w > width || off_y < 0 || off_y + rect_h > height ||*/ rect_w <= 0.5*min_size || rect_h <= 0.5*min_size) { (*it).exist = false; continue; } else { src_off_x.push_back(off_x); src_off_y.push_back(off_y); src_rect_w.push_back(rect_w); src_rect_h.push_back(rect_h); r_count++; secondBbox.push_back(*it); } } } int batch_size = BATCH_SIZE; int per_num = ceil((float)r_count / thread_num); int need_thread_num = thread_num; if (per_num > batch_size) { need_thread_num = ceil((float)r_count / batch_size); per_num = batch_size; } std::vector<ZQ_CNN_Tensor4D_Interface> task_rnet_images(need_thread_num); std::vector<std::vector<int> > task_src_off_x(need_thread_num); std::vector<std::vector<int> > task_src_off_y(need_thread_num); std::vector<std::vector<int> > task_src_rect_w(need_thread_num); std::vector<std::vector<int> > task_src_rect_h(need_thread_num); std::vector<std::vector<ZQ_CNN_BBox> > task_secondBbox(need_thread_num); for (int i = 0; i < need_thread_num; i++) { int st_id = per_num*i; int end_id = __min(r_count, per_num*(i + 1)); int cur_num = end_id - st_id; if (cur_num > 0) { task_src_off_x[i].resize(cur_num); task_src_off_y[i].resize(cur_num); task_src_rect_w[i].resize(cur_num); task_src_rect_h[i].resize(cur_num); task_secondBbox[i].resize(cur_num); for (int j = 0; j < cur_num; j++) { task_src_off_x[i][j] = src_off_x[st_id + j]; task_src_off_y[i][j] = src_off_y[st_id + j]; task_src_rect_w[i][j] = src_rect_w[st_id + j]; task_src_rect_h[i][j] = src_rect_h[st_id + j]; task_secondBbox[i][j] = secondBbox[st_id + j]; } } } if (thread_num <= 1) { for (int pp = 0; pp < need_thread_num; pp++) { if (task_src_off_x.size() == 0) continue; if (!input.ResizeBilinearRect(task_rnet_images[pp], rnet_size, rnet_size, 0, 0, task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp])) { continue; } rnet[0].Forward(task_rnet_images[pp]); const ZQ_CNN_Tensor4D_Interface_Base* score = rnet[0].GetBlobByName("prob1"); const ZQ_CNN_Tensor4D_Interface_Base* location = rnet[0].GetBlobByName("conv5-2"); const float* score_ptr = score->GetFirstPixelPtr(); const float* location_ptr = location->GetFirstPixelPtr(); int score_sliceStep = score->GetSliceStep(); int location_sliceStep = location->GetSliceStep(); int task_count = 0; for (int i = 0; i < task_secondBbox[pp].size(); i++) { if (score_ptr[i*score_sliceStep + 1] > thresh[1]) { for (int j = 0; j < 4; j++) task_secondBbox[pp][i].regreCoord[j] = location_ptr[i*location_sliceStep + j]; task_secondBbox[pp][i].area = task_src_rect_w[pp][i] * task_src_rect_h[pp][i]; task_secondBbox[pp][i].score = score_ptr[i*score_sliceStep + 1]; task_count++; } else { task_secondBbox[pp][i].exist = false; } } if (task_count < 1) { task_secondBbox[pp].clear(); continue; } for (int i = task_secondBbox[pp].size() - 1; i >= 0; i--) { if (!task_secondBbox[pp][i].exist) task_secondBbox[pp].erase(task_secondBbox[pp].begin() + i); } } } else { #pragma omp parallel for num_threads(thread_num) schedule(dynamic,1) for (int pp = 0; pp < need_thread_num; pp++) { int thread_id = omp_get_thread_num(); if (task_src_off_x.size() == 0) continue; if (!input.ResizeBilinearRect(task_rnet_images[pp], rnet_size, rnet_size, 0, 0, task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp])) { continue; } rnet[thread_id].Forward(task_rnet_images[pp]); const ZQ_CNN_Tensor4D_Interface_Base* score = rnet[thread_id].GetBlobByName("prob1"); const ZQ_CNN_Tensor4D_Interface_Base* location = rnet[thread_id].GetBlobByName("conv5-2"); const float* score_ptr = score->GetFirstPixelPtr(); const float* location_ptr = location->GetFirstPixelPtr(); int score_sliceStep = score->GetSliceStep(); int location_sliceStep = location->GetSliceStep(); int task_count = 0; for (int i = 0; i < task_secondBbox[pp].size(); i++) { if (score_ptr[i*score_sliceStep + 1] > thresh[1]) { for (int j = 0; j < 4; j++) task_secondBbox[pp][i].regreCoord[j] = location_ptr[i*location_sliceStep + j]; task_secondBbox[pp][i].area = task_src_rect_w[pp][i] * task_src_rect_h[pp][i]; task_secondBbox[pp][i].score = score_ptr[i*score_sliceStep + 1]; task_count++; } else { task_secondBbox[pp][i].exist = false; } } if (task_count < 1) { task_secondBbox[pp].clear(); continue; } for (int i = task_secondBbox[pp].size() - 1; i >= 0; i--) { if (!task_secondBbox[pp][i].exist) task_secondBbox[pp].erase(task_secondBbox[pp].begin() + i); } } } int count = 0; for (int i = 0; i < need_thread_num; i++) { count += task_secondBbox[i].size(); } secondBbox.resize(count); secondScore.resize(count); int id = 0; for (int i = 0; i < need_thread_num; i++) { for (int j = 0; j < task_secondBbox[i].size(); j++) { secondBbox[id] = task_secondBbox[i][j]; secondScore[id].score = secondBbox[id].score; secondScore[id].oriOrder = id; id++; } } //ZQ_CNN_BBoxUtils::_nms(secondBbox, secondScore, nms_thresh[1], "Union"); ZQ_CNN_BBoxUtils::_nms(secondBbox, secondScore, nms_thresh[1], "Min"); ZQ_CNN_BBoxUtils::_refine_and_square_bbox(secondBbox, width, height, true); count = secondBbox.size(); double t4 = omp_get_wtime(); if (show_debug_info) printf("run Rnet [%d] times, candidate after nms: %d \n", r_count, count); if (show_debug_info) printf("stage 2: cost %.3f ms\n", 1000 * (t4 - t3)); return true; } bool _Onet_stage(const ZQ_CNN_Tensor4D_Interface& input, std::vector<ZQ_CNN_BBox>& secondBbox, std::vector<ZQ_CNN_BBox>& thirdBbox) { double t4 = omp_get_wtime(); thirdBbox.clear(); std::vector<ZQ_CNN_BBox>::iterator it = secondBbox.begin(); std::vector<ZQ_CNN_OrderScore> thirdScore; std::vector<ZQ_CNN_BBox> early_accept_thirdBbox; std::vector<int> src_off_x, src_off_y, src_rect_w, src_rect_h; int o_count = 0; for (; it != secondBbox.end(); it++) { if ((*it).exist) { int off_x = it->col1; int off_y = it->row1; int rect_w = it->col2 - off_x; int rect_h = it->row2 - off_y; if (/*off_x < 0 || off_x + rect_w > width || off_y < 0 || off_y + rect_h > height ||*/ rect_w <= 0.5*min_size || rect_h <= 0.5*min_size) { (*it).exist = false; continue; } else { if (!do_landmark && it->score > early_accept_thresh) { early_accept_thirdBbox.push_back(*it); } else { src_off_x.push_back(off_x); src_off_y.push_back(off_y); src_rect_w.push_back(rect_w); src_rect_h.push_back(rect_h); o_count++; thirdBbox.push_back(*it); } } } } int batch_size = BATCH_SIZE; int per_num = ceil((float)o_count / thread_num); int need_thread_num = thread_num; if (per_num > batch_size) { need_thread_num = ceil((float)o_count / batch_size); per_num = batch_size; } std::vector<ZQ_CNN_Tensor4D_Interface> task_onet_images(need_thread_num); std::vector<std::vector<int> > task_src_off_x(need_thread_num); std::vector<std::vector<int> > task_src_off_y(need_thread_num); std::vector<std::vector<int> > task_src_rect_w(need_thread_num); std::vector<std::vector<int> > task_src_rect_h(need_thread_num); std::vector<std::vector<ZQ_CNN_BBox> > task_thirdBbox(need_thread_num); for (int i = 0; i < need_thread_num; i++) { int st_id = per_num*i; int end_id = __min(o_count, per_num*(i + 1)); int cur_num = end_id - st_id; if (cur_num > 0) { task_src_off_x[i].resize(cur_num); task_src_off_y[i].resize(cur_num); task_src_rect_w[i].resize(cur_num); task_src_rect_h[i].resize(cur_num); task_thirdBbox[i].resize(cur_num); for (int j = 0; j < cur_num; j++) { task_src_off_x[i][j] = src_off_x[st_id + j]; task_src_off_y[i][j] = src_off_y[st_id + j]; task_src_rect_w[i][j] = src_rect_w[st_id + j]; task_src_rect_h[i][j] = src_rect_h[st_id + j]; task_thirdBbox[i][j] = thirdBbox[st_id + j]; } } } if (thread_num <= 1) { for (int pp = 0; pp < need_thread_num; pp++) { if (task_src_off_x.size() == 0 || task_src_off_x[pp].size() == 0) continue; if (!input.ResizeBilinearRect(task_onet_images[pp], onet_size, onet_size, 0, 0, task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp])) { continue; } double t31 = omp_get_wtime(); onet[0].Forward(task_onet_images[pp]); double t32 = omp_get_wtime(); const ZQ_CNN_Tensor4D_Interface_Base* score = onet[0].GetBlobByName("prob1"); const ZQ_CNN_Tensor4D_Interface_Base* location = onet[0].GetBlobByName("conv6-2"); const ZQ_CNN_Tensor4D_Interface_Base* keyPoint = onet[0].GetBlobByName("conv6-3"); const float* score_ptr = score->GetFirstPixelPtr(); const float* location_ptr = location->GetFirstPixelPtr(); const float* keyPoint_ptr = 0; if (keyPoint != 0) keyPoint_ptr = keyPoint->GetFirstPixelPtr(); int score_sliceStep = score->GetSliceStep(); int location_sliceStep = location->GetSliceStep(); int keyPoint_sliceStep = 0; if (keyPoint != 0) keyPoint_sliceStep = keyPoint->GetSliceStep(); int task_count = 0; ZQ_CNN_OrderScore order; for (int i = 0; i < task_thirdBbox[pp].size(); i++) { if (score_ptr[i*score_sliceStep + 1] > thresh[2]) { for (int j = 0; j < 4; j++) task_thirdBbox[pp][i].regreCoord[j] = location_ptr[i*location_sliceStep + j]; if (keyPoint != 0) { for (int num = 0; num < 5; num++) { task_thirdBbox[pp][i].ppoint[num] = task_thirdBbox[pp][i].col1 + (task_thirdBbox[pp][i].col2 - task_thirdBbox[pp][i].col1)*keyPoint_ptr[i*keyPoint_sliceStep + num]; task_thirdBbox[pp][i].ppoint[num + 5] = task_thirdBbox[pp][i].row1 + (task_thirdBbox[pp][i].row2 - task_thirdBbox[pp][i].row1)*keyPoint_ptr[i*keyPoint_sliceStep + num + 5]; } } task_thirdBbox[pp][i].area = task_src_rect_w[pp][i] * task_src_rect_h[pp][i]; task_thirdBbox[pp][i].score = score_ptr[i*score_sliceStep + 1]; task_count++; } else { task_thirdBbox[pp][i].exist = false; } } if (task_count < 1) { task_thirdBbox[pp].clear(); continue; } for (int i = task_thirdBbox[pp].size() - 1; i >= 0; i--) { if (!task_thirdBbox[pp][i].exist) task_thirdBbox[pp].erase(task_thirdBbox[pp].begin() + i); } } } else { #pragma omp parallel for num_threads(thread_num) schedule(dynamic,1) for (int pp = 0; pp < need_thread_num; pp++) { int thread_id = omp_get_thread_num(); if (task_src_off_x.size() == 0 || task_src_off_x[pp].size() == 0) continue; if (!input.ResizeBilinearRect(task_onet_images[pp], onet_size, onet_size, 0, 0, task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp])) { continue; } double t31 = omp_get_wtime(); onet[thread_id].Forward(task_onet_images[pp]); double t32 = omp_get_wtime(); const ZQ_CNN_Tensor4D_Interface_Base* score = onet[thread_id].GetBlobByName("prob1"); const ZQ_CNN_Tensor4D_Interface_Base* location = onet[thread_id].GetBlobByName("conv6-2"); const ZQ_CNN_Tensor4D_Interface_Base* keyPoint = onet[thread_id].GetBlobByName("conv6-3"); const float* score_ptr = score->GetFirstPixelPtr(); const float* location_ptr = location->GetFirstPixelPtr(); const float* keyPoint_ptr = 0; if (keyPoint != 0) keyPoint_ptr = keyPoint->GetFirstPixelPtr(); int score_sliceStep = score->GetSliceStep(); int location_sliceStep = location->GetSliceStep(); int keyPoint_sliceStep = 0; if (keyPoint != 0) keyPoint_sliceStep = keyPoint->GetSliceStep(); int task_count = 0; ZQ_CNN_OrderScore order; for (int i = 0; i < task_thirdBbox[pp].size(); i++) { if (score_ptr[i*score_sliceStep + 1] > thresh[2]) { for (int j = 0; j < 4; j++) task_thirdBbox[pp][i].regreCoord[j] = location_ptr[i*location_sliceStep + j]; if (keyPoint != 0) { for (int num = 0; num < 5; num++) { task_thirdBbox[pp][i].ppoint[num] = task_thirdBbox[pp][i].col1 + (task_thirdBbox[pp][i].col2 - task_thirdBbox[pp][i].col1)*keyPoint_ptr[i*keyPoint_sliceStep + num]; task_thirdBbox[pp][i].ppoint[num + 5] = task_thirdBbox[pp][i].row1 + (task_thirdBbox[pp][i].row2 - task_thirdBbox[pp][i].row1)*keyPoint_ptr[i*keyPoint_sliceStep + num + 5]; } } task_thirdBbox[pp][i].area = task_src_rect_w[pp][i] * task_src_rect_h[pp][i]; task_thirdBbox[pp][i].score = score_ptr[i*score_sliceStep + 1]; task_count++; } else { task_thirdBbox[pp][i].exist = false; } } if (task_count < 1) { task_thirdBbox[pp].clear(); continue; } for (int i = task_thirdBbox[pp].size() - 1; i >= 0; i--) { if (!task_thirdBbox[pp][i].exist) task_thirdBbox[pp].erase(task_thirdBbox[pp].begin() + i); } } } int count = 0; for (int i = 0; i < need_thread_num; i++) { count += task_thirdBbox[i].size(); } thirdBbox.resize(count); thirdScore.resize(count); int id = 0; for (int i = 0; i < need_thread_num; i++) { for (int j = 0; j < task_thirdBbox[i].size(); j++) { thirdBbox[id] = task_thirdBbox[i][j]; thirdScore[id].score = task_thirdBbox[i][j].score; thirdScore[id].oriOrder = id; id++; } } ZQ_CNN_BBoxUtils::_refine_and_square_bbox(thirdBbox, width, height, false); ZQ_CNN_OrderScore order; for (int i = 0; i < early_accept_thirdBbox.size(); i++) { order.score = early_accept_thirdBbox[i].score; order.oriOrder = count++; thirdScore.push_back(order); thirdBbox.push_back(early_accept_thirdBbox[i]); } ZQ_CNN_BBoxUtils::_nms(thirdBbox, thirdScore, nms_thresh[2], "Min"); double t5 = omp_get_wtime(); if (show_debug_info) printf("run Onet [%d] times, candidate before nms: %d \n", o_count, count); if (show_debug_info) printf("stage 3: cost %.3f ms\n", 1000 * (t5 - t4)); return true; } bool _Lnet_stage(const ZQ_CNN_Tensor4D_Interface& input, std::vector<ZQ_CNN_BBox>& thirdBbox, std::vector<ZQ_CNN_BBox>& fourthBbox) { double t4 = omp_get_wtime(); fourthBbox.clear(); std::vector<ZQ_CNN_BBox>::iterator it = thirdBbox.begin(); std::vector<int> src_off_x, src_off_y, src_rect_w, src_rect_h; int l_count = 0; for (; it != thirdBbox.end(); it++) { if ((*it).exist) { int off_x = it->col1; int off_y = it->row1; int rect_w = it->col2 - off_x; int rect_h = it->row2 - off_y; if (/*off_x < 0 || off_x + rect_w > width || off_y < 0 || off_y + rect_h > height ||*/ rect_w <= 0.5*min_size || rect_h <= 0.5*min_size) { (*it).exist = false; continue; } else { l_count++; fourthBbox.push_back(*it); } } } std::vector<ZQ_CNN_BBox> copy_fourthBbox = fourthBbox; ZQ_CNN_BBoxUtils::_square_bbox(copy_fourthBbox, width, height); for (it = copy_fourthBbox.begin(); it != copy_fourthBbox.end(); ++it) { int off_x = it->col1; int off_y = it->row1; int rect_w = it->col2 - off_x; int rect_h = it->row2 - off_y; src_off_x.push_back(off_x); src_off_y.push_back(off_y); src_rect_w.push_back(rect_w); src_rect_h.push_back(rect_h); } int batch_size = BATCH_SIZE; int per_num = ceil((float)l_count / thread_num); int need_thread_num = thread_num; if (per_num > batch_size) { need_thread_num = ceil((float)l_count / batch_size); per_num = batch_size; } std::vector<ZQ_CNN_Tensor4D_Interface> task_lnet_images(need_thread_num); std::vector<std::vector<int> > task_src_off_x(need_thread_num); std::vector<std::vector<int> > task_src_off_y(need_thread_num); std::vector<std::vector<int> > task_src_rect_w(need_thread_num); std::vector<std::vector<int> > task_src_rect_h(need_thread_num); std::vector<std::vector<ZQ_CNN_BBox> > task_fourthBbox(need_thread_num); for (int i = 0; i < need_thread_num; i++) { int st_id = per_num*i; int end_id = __min(l_count, per_num*(i + 1)); int cur_num = end_id - st_id; if (cur_num > 0) { task_src_off_x[i].resize(cur_num); task_src_off_y[i].resize(cur_num); task_src_rect_w[i].resize(cur_num); task_src_rect_h[i].resize(cur_num); task_fourthBbox[i].resize(cur_num); for (int j = 0; j < cur_num; j++) { task_src_off_x[i][j] = src_off_x[st_id + j]; task_src_off_y[i][j] = src_off_y[st_id + j]; task_src_rect_w[i][j] = src_rect_w[st_id + j]; task_src_rect_h[i][j] = src_rect_h[st_id + j]; task_fourthBbox[i][j] = copy_fourthBbox[st_id + j]; } } } if (thread_num <= 1) { for (int pp = 0; pp < need_thread_num; pp++) { if (task_src_off_x.size() == 0) continue; if (!input.ResizeBilinearRect(task_lnet_images[pp], lnet_size, lnet_size, 0, 0, task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp])) { continue; } double t31 = omp_get_wtime(); lnet[0].Forward(task_lnet_images[pp]); double t32 = omp_get_wtime(); const ZQ_CNN_Tensor4D_Interface_Base* keyPoint = lnet[0].GetBlobByName("conv6-3"); const float* keyPoint_ptr = keyPoint->GetFirstPixelPtr(); int keyPoint_sliceStep = keyPoint->GetSliceStep(); for (int i = 0; i < task_fourthBbox[pp].size(); i++) { for (int num = 0; num < 5; num++) { task_fourthBbox[pp][i].ppoint[num] = task_fourthBbox[pp][i].col1 + (task_fourthBbox[pp][i].col2 - task_fourthBbox[pp][i].col1)*keyPoint_ptr[i*keyPoint_sliceStep + num]; task_fourthBbox[pp][i].ppoint[num + 5] = task_fourthBbox[pp][i].row1 + (task_fourthBbox[pp][i].row2 - task_fourthBbox[pp][i].row1)*keyPoint_ptr[i*keyPoint_sliceStep + num + 5]; } } } } else { #pragma omp parallel for num_threads(thread_num) schedule(dynamic,1) for (int pp = 0; pp < need_thread_num; pp++) { int thread_id = omp_get_thread_num(); if (task_src_off_x.size() == 0) continue; if (!input.ResizeBilinearRect(task_lnet_images[pp], lnet_size, lnet_size, 0, 0, task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp])) { continue; } double t31 = omp_get_wtime(); lnet[thread_id].Forward(task_lnet_images[pp]); double t32 = omp_get_wtime(); const ZQ_CNN_Tensor4D_Interface_Base* keyPoint = lnet[thread_id].GetBlobByName("conv6-3"); const float* keyPoint_ptr = keyPoint->GetFirstPixelPtr(); int keyPoint_sliceStep = keyPoint->GetSliceStep(); for (int i = 0; i < task_fourthBbox[pp].size(); i++) { for (int num = 0; num < 5; num++) { task_fourthBbox[pp][i].ppoint[num] = task_fourthBbox[pp][i].col1 + (task_fourthBbox[pp][i].col2 - task_fourthBbox[pp][i].col1)*keyPoint_ptr[i*keyPoint_sliceStep + num]; task_fourthBbox[pp][i].ppoint[num + 5] = task_fourthBbox[pp][i].row1 + (task_fourthBbox[pp][i].row2 - task_fourthBbox[pp][i].row1)*keyPoint_ptr[i*keyPoint_sliceStep + num + 5]; } } } } int count = 0; for (int i = 0; i < need_thread_num; i++) { count += task_fourthBbox[i].size(); } fourthBbox.resize(count); int id = 0; for (int i = 0; i < need_thread_num; i++) { for (int j = 0; j < task_fourthBbox[i].size(); j++) { memcpy(fourthBbox[id].ppoint, task_fourthBbox[i][j].ppoint, sizeof(float) * 10); id++; } } double t5 = omp_get_wtime(); if (show_debug_info) printf("run Lnet [%d] times \n", l_count); if (show_debug_info) printf("stage 4: cost %.3f ms\n", 1000 * (t5 - t4)); return true; } bool _Lnet106_stage(const ZQ_CNN_Tensor4D_Interface& input, std::vector<ZQ_CNN_BBox>& thirdBbox, std::vector<ZQ_CNN_BBox106>& resultBbox) { double t4 = omp_get_wtime(); std::vector<ZQ_CNN_BBox> fourthBbox; std::vector<ZQ_CNN_BBox>::iterator it = thirdBbox.begin(); std::vector<int> src_off_x, src_off_y, src_rect_w, src_rect_h; int l_count = 0; for (; it != thirdBbox.end(); it++) { if ((*it).exist) { int off_x = it->col1; int off_y = it->row1; int rect_w = it->col2 - off_x; int rect_h = it->row2 - off_y; if (/*off_x < 0 || off_x + rect_w > width || off_y < 0 || off_y + rect_h > height ||*/ rect_w <= 0.5*min_size || rect_h <= 0.5*min_size) { (*it).exist = false; continue; } else { l_count++; fourthBbox.push_back(*it); } } } std::vector<ZQ_CNN_BBox> copy_fourthBbox = fourthBbox; ZQ_CNN_BBoxUtils::_square_bbox(copy_fourthBbox, width, height); for (it = copy_fourthBbox.begin(); it != copy_fourthBbox.end(); ++it) { int off_x = it->col1; int off_y = it->row1; int rect_w = it->col2 - off_x; int rect_h = it->row2 - off_y; src_off_x.push_back(off_x); src_off_y.push_back(off_y); src_rect_w.push_back(rect_w); src_rect_h.push_back(rect_h); } int batch_size = BATCH_SIZE; int per_num = ceil((float)l_count / thread_num); int need_thread_num = thread_num; if (per_num > batch_size) { need_thread_num = ceil((float)l_count / batch_size); per_num = batch_size; } std::vector<ZQ_CNN_Tensor4D_NHW_C_Align128bit> task_lnet_images(need_thread_num); std::vector<std::vector<int> > task_src_off_x(need_thread_num); std::vector<std::vector<int> > task_src_off_y(need_thread_num); std::vector<std::vector<int> > task_src_rect_w(need_thread_num); std::vector<std::vector<int> > task_src_rect_h(need_thread_num); std::vector<std::vector<ZQ_CNN_BBox106> > task_fourthBbox(need_thread_num); for (int i = 0; i < need_thread_num; i++) { int st_id = per_num*i; int end_id = __min(l_count, per_num*(i + 1)); int cur_num = end_id - st_id; if (cur_num > 0) { task_src_off_x[i].resize(cur_num); task_src_off_y[i].resize(cur_num); task_src_rect_w[i].resize(cur_num); task_src_rect_h[i].resize(cur_num); task_fourthBbox[i].resize(cur_num); for (int j = 0; j < cur_num; j++) { task_src_off_x[i][j] = src_off_x[st_id + j]; task_src_off_y[i][j] = src_off_y[st_id + j]; task_src_rect_w[i][j] = src_rect_w[st_id + j]; task_src_rect_h[i][j] = src_rect_h[st_id + j]; task_fourthBbox[i][j].col1 = copy_fourthBbox[st_id + j].col1; task_fourthBbox[i][j].col2 = copy_fourthBbox[st_id + j].col2; task_fourthBbox[i][j].row1 = copy_fourthBbox[st_id + j].row1; task_fourthBbox[i][j].row2 = copy_fourthBbox[st_id + j].row2; task_fourthBbox[i][j].area = copy_fourthBbox[st_id + j].area; task_fourthBbox[i][j].score = copy_fourthBbox[st_id + j].score; task_fourthBbox[i][j].exist = copy_fourthBbox[st_id + j].exist; } } } resultBbox.resize(l_count); for (int i = 0; i < l_count; i++) { resultBbox[i].col1 = fourthBbox[i].col1; resultBbox[i].col2 = fourthBbox[i].col2; resultBbox[i].row1 = fourthBbox[i].row1; resultBbox[i].row2 = fourthBbox[i].row2; resultBbox[i].score = fourthBbox[i].score; resultBbox[i].exist = fourthBbox[i].exist; resultBbox[i].area = fourthBbox[i].area; } if (thread_num <= 1) { for (int pp = 0; pp < need_thread_num; pp++) { if (task_src_off_x[pp].size() == 0) continue; if (!input.ResizeBilinearRect(task_lnet_images[pp], lnet_size, lnet_size, 0, 0, task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp])) { continue; } double t31 = omp_get_wtime(); lnet[0].Forward(task_lnet_images[pp]); double t32 = omp_get_wtime(); const ZQ_CNN_Tensor4D_Interface_Base* keyPoint = lnet[0].GetBlobByName("conv6-3"); const float* keyPoint_ptr = keyPoint->GetFirstPixelPtr(); int keypoint_num = keyPoint->GetC() / 2; int keyPoint_sliceStep = keyPoint->GetSliceStep(); for (int i = 0; i < task_fourthBbox[pp].size(); i++) { for (int num = 0; num < keypoint_num; num++) { if ((num >= 33 && num < 43) || (num >= 64 && num < 72) || (num >= 84 && num < 104)) { task_fourthBbox[pp][i].ppoint[num * 2] = task_fourthBbox[pp][i].col1 + (task_fourthBbox[pp][i].col2 - task_fourthBbox[pp][i].col1)*keyPoint_ptr[i*keyPoint_sliceStep + num * 2]/**0.25*/; task_fourthBbox[pp][i].ppoint[num * 2 + 1] = task_fourthBbox[pp][i].row1 + (task_fourthBbox[pp][i].row2 - task_fourthBbox[pp][i].row1)*keyPoint_ptr[i*keyPoint_sliceStep + num * 2 + 1]/**0.25*/; } else { task_fourthBbox[pp][i].ppoint[num * 2] = task_fourthBbox[pp][i].col1 + (task_fourthBbox[pp][i].col2 - task_fourthBbox[pp][i].col1)*keyPoint_ptr[i*keyPoint_sliceStep + num * 2]; task_fourthBbox[pp][i].ppoint[num * 2 + 1] = task_fourthBbox[pp][i].row1 + (task_fourthBbox[pp][i].row2 - task_fourthBbox[pp][i].row1)*keyPoint_ptr[i*keyPoint_sliceStep + num * 2 + 1]; } } } } } else { #pragma omp parallel for num_threads(thread_num) for (int pp = 0; pp < need_thread_num; pp++) { int thread_id = omp_get_thread_num(); if (task_src_off_x.size() == 0) continue; if (!input.ResizeBilinearRect(task_lnet_images[pp], lnet_size, lnet_size, 0, 0, task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp])) { continue; } double t31 = omp_get_wtime(); lnet[thread_id].Forward(task_lnet_images[pp]); double t32 = omp_get_wtime(); const ZQ_CNN_Tensor4D_Interface_Base* keyPoint = lnet[thread_id].GetBlobByName("conv6-3"); const float* keyPoint_ptr = keyPoint->GetFirstPixelPtr(); int keypoint_num = keyPoint->GetC() / 2; int keyPoint_sliceStep = keyPoint->GetSliceStep(); for (int i = 0; i < task_fourthBbox[pp].size(); i++) { for (int num = 0; num < keypoint_num; num++) { if ((num >= 33 && num < 43) || (num >= 64 && num < 72) || (num >= 84 && num < 104)) { task_fourthBbox[pp][i].ppoint[num * 2] = task_fourthBbox[pp][i].col1 + (task_fourthBbox[pp][i].col2 - task_fourthBbox[pp][i].col1)*keyPoint_ptr[i*keyPoint_sliceStep + num * 2] * 0.5; task_fourthBbox[pp][i].ppoint[num * 2 + 1] = task_fourthBbox[pp][i].row1 + (task_fourthBbox[pp][i].row2 - task_fourthBbox[pp][i].row1)*keyPoint_ptr[i*keyPoint_sliceStep + num * 2 + 1] * 0.5; } else { task_fourthBbox[pp][i].ppoint[num * 2] = task_fourthBbox[pp][i].col1 + (task_fourthBbox[pp][i].col2 - task_fourthBbox[pp][i].col1)*keyPoint_ptr[i*keyPoint_sliceStep + num * 2]; task_fourthBbox[pp][i].ppoint[num * 2 + 1] = task_fourthBbox[pp][i].row1 + (task_fourthBbox[pp][i].row2 - task_fourthBbox[pp][i].row1)*keyPoint_ptr[i*keyPoint_sliceStep + num * 2 + 1]; } } } } } int count = 0; for (int i = 0; i < need_thread_num; i++) { count += task_fourthBbox[i].size(); } resultBbox.resize(count); int id = 0; for (int i = 0; i < need_thread_num; i++) { for (int j = 0; j < task_fourthBbox[i].size(); j++) { memcpy(resultBbox[id].ppoint, task_fourthBbox[i][j].ppoint, sizeof(float) * 212); id++; } } double t5 = omp_get_wtime(); if (show_debug_info) printf("run Lnet [%d] times \n", l_count); if (show_debug_info) printf("stage 4: cost %.3f ms\n", 1000 * (t5 - t4)); return true; } void _select(std::vector<ZQ_CNN_BBox>& bbox, int limit_num, int width, int height) { int in_num = bbox.size(); if (limit_num >= in_num) return; bbox.resize(limit_num); } }; } #endif
testrun_tools.c
/*** ------------------------------------------------------------------------ Copyright 2018 Markus Toepfer Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ------------------------------------------------------------------------ *//** @file testrun_tools.c @author Markus Toepfer @date 2018-07-10 @ingroup testrun_lib @brief Standard implementation of all required testrun tools. ------------------------------------------------------------------------ */ #include "../include/testrun_tools.h" /* * ------------------------------------------------------------------------ * * SHELL header * * this constant string will be used to generate the default SHELL header * of the testrun scripts. * * ------------------------------------------------------------------------ */ static const char *bash_header = "#!/usr/bin/env bash\n" "#\n" "# Copyright 2017 Markus Toepfer\n" "#\n" "# Licensed under the Apache License, Version 2.0 (the \"License\");\n" "# you may not use this file except in compliance with the License.\n" "# You may obtain a copy of the License at\n" "#\n" "# http://www.apache.org/licenses/LICENSE-2.0\n" "#\n" "# Unless required by applicable law or agreed to in writing, software\n" "# distributed under the License is distributed on an \"AS IS\" BASIS,\n" "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n" "# See the License for the specific language governing permissions and\n" "# limitations under the License.\n" "#\n" "# ------------------------------------------------------------------------\n"; /* * ------------------------------------------------------------------------ * * C header file #TESRUN_HEADER * * this constant string will be used to generate * testrun.h * * ------------------------------------------------------------------------ */ static const char *testrun_header = "/***\n" " ------------------------------------------------------------------------\n" "\n" " Copyright 2017 Markus Toepfer\n" "\n" " Licensed under the Apache License, Version 2.0 (the \"License\");\n" " you may not use this file except in compliance with the License.\n" " You may obtain a copy of the License at\n" "\n" " http://www.apache.org/licenses/LICENSE-2.0\n" "\n" " Unless required by applicable law or agreed to in writing, software\n" " distributed under the License is distributed on an \"AS IS\" BASIS,\n" " WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n" " See the License for the specific language governing permissions and\n" " limitations under the License.\n" "\n" " This file is part of the testrun project. http://testrun.info\n" "\n" " ------------------------------------------------------------------------\n" "*//**\n" "\n" " @file testrun.h\n" " @author Markus Toepfer\n" " @date 2017-11-24\n" "\n" " @brief Simple serial test execution framework.\n" "\n" " NOTE This framework uses an alternative to assert based\n" " testing, which is compatible with parallel\n" " test execution of @see testrun2.h. So this header is\n" " replacable with testrun2.h for parallel test setup,\n" " without replacing any written testcase.\n" "\n" " (Assert.h is not included, to force to write testrun2.h\n" " compatible tests by default)\n" "\n" " ------------------------------------------------------------------------\n" "*/\n" "\n" "#ifndef testrun_h\n" "#define testrun_h\n" "\n" "#include <unistd.h> /* C89/C90 */\n" "#include <stdlib.h> /* C89/C90 */\n" "#include <stdio.h> /* C89/C90 */\n" "#include <string.h> /* C89/C90 */\n" "#include <errno.h> /* C89/C90 */\n" "#include <time.h> /* C89/C90 */\n" "\n" "/*----------------------------------------------------------------------------*/\n" "\n" "/**\n" " String error initialization of no error.\n" "*/\n" "#define testrun_errno() \\\n" " (errno == 0 ? \"NONE\" : strerror(errno))\n" "\n" "/*----------------------------------------------------------------------------*/\n" "\n" "/**\n" " Log a failure. Failure: Inability to perform a function as expected.\n" "*/\n" "#define testrun_log_failure(msg, ...) \\\n" " fprintf(stderr, \"\\t[FAIL]\\t%s line:%d errno:%s message: \" msg \"\\n\",\\\n" " __FUNCTION__, __LINE__, testrun_errno(), ##__VA_ARGS__)\n" "\n" "/*----------------------------------------------------------------------------*/\n" "\n" "/**\n" " Log an error. Error: Difference between expected and actual result.\n" "*/\n" "#define testrun_log_error(msg, ...) \\\n" " fprintf(stderr, \"\\t[ERROR]\\t%s line:%d errno:%s message: \" msg \"\\n\",\\\n" " __FUNCTION__, __LINE__, testrun_errno(), ##__VA_ARGS__)\n" "\n" "/*----------------------------------------------------------------------------*/\n" "\n" "#define testrun_log_success(msg, ...) \\\n" " fprintf(stdout, \"\\t[OK] \\t%s \" msg \"\\n\", __FUNCTION__, ##__VA_ARGS__)\n" "\n" "/*----------------------------------------------------------------------------*/\n" "\n" "#define testrun_log(msg, ...) \\\n" " fprintf(stdout, \"\\t\" msg \"\\n\", ##__VA_ARGS__)\n" "\n" "/*----------------------------------------------------------------------------*/\n" "\n" "#define testrun_log_function_info(msg, ...) \\\n" " fprintf(stdout, \"\\t[INFO] \\t%s line:%d message: \" msg \"\\n\", \\\n" " __FUNCTION__, __LINE__, ##__VA_ARGS__)\n" "\n" "/*----------------------------------------------------------------------------*/\n" "\n" "#define testrun_log_clock(start, end) \\\n" " fprintf(stdout, \"\\tClock ticks function: ( %s ) | %f s | %.0f ms \\n\", \\\n" " __func__, \\\n" " ((double)(end - start)) / CLOCKS_PER_SEC, \\\n" " (((double)(end - start)) / CLOCKS_PER_SEC ) * 1000)\n" "\n" "/*----------------------------------------------------------------------------*/\n" "\n" "#define testrun_init() \\\n" " int result = 0; \\\n" " int testrun_counter = 0;\n" "\n" "/*----------------------------------------------------------------------------*/\n" "\n" "/**\n" " Run a single atomar test. Return the surrounding block on error.\n" " This function will leave the context block running on error. The\n" " Mindset is a defused assert. LEAVE THE FUNCTION NOT THE PROGRAM.\n" "\n" " @param test boolean decision input.\n" " @returns the calling function on error with -1\n" "*/\n" "#define testrun_check(test, ... )\\\n" " if (!(test)) { testrun_log_error(__VA_ARGS__); return -1;}\n" "\n" "/*----------------------------------------------------------------------------*/\n" "\n" "/**\n" " Alias to @see testrun_check.\n" "*/\n" "#define testrun(test, ...)\\\n" " testrun_check(test, __VA_ARGS__ )\n" "\n" "/*----------------------------------------------------------------------------*/\n" "\n" "/*--------------- For EXAMPLE code check http://testrun.info -----------------*/\n" "/**\n" " Run a single test (execute a function pointer. Runs a test function.\n" " On non negative return value of the function run, a testrun_counter\n" " is increased, on negative result, the negative result will be returned.\n" "\n" " @param test function pointer of the test to run\n" " @NOTE The surrounding block is left on negative result of the\n" " function pointer execution.\n" "*/\n" "#define testrun_test(test)\\\n" " result = test(); testrun_counter++; if (result < 0) return result;\n" "\n" "/**\n" " Runs a function pointer, which SHALL contain the test function pointers\n" " to run. The function pointer is wrapped in a main procedure, which and\n" " allows indepentent testruns of the input testcluster over external\n" " execution.\n" "\n" " A clock will be started, as soon as the main is executed and the the\n" " time is stopped again, at the end of the execution. The difference\n" " will be printed and is the runtime of the whole input testcluster.\n" "\n" " A run will fail, as soon as one of the tests in the testcluster fails.\n" " (Fail on first) or will run all functions dependent on the testsetup.\n" "\n" " @param testcluster function pointer to be executed.\n" "*/\n" "#define testrun_run(testcluster) int main(int argc, char *argv[]) {\\\n" " argc = argc;\\\n" " clock_t start1_t, end1_t; \\\n" " start1_t = clock(); \\\n" " testrun_log(\"\\ntestrun\\t%s\", argv[0]);\\\n" " int64_t result = testcluster();\\\n" " if (result > 0) \\\n" " testrun_log(\"ALL TESTS RUN (%jd tests)\", result);\\\n" " end1_t = clock(); \\\n" " testrun_log_clock(start1_t, end1_t); \\\n" " testrun_log(\"\");\\\n" " result >=0 ? exit(EXIT_SUCCESS) : exit(EXIT_FAILURE); \\\n" "}\n" "\n" "/** -----------------------------------------------------------------------\n" "\n" " @example testrun_base_example.c\n" " @author Markus Toepfer\n" " @date 2017-11-24\n" "\n" " @brief Example test file using testrun.h\n" "\n" " This example shows the test style for testing with testrun.h and is\n" " build around the testrun_test() macro, which increases a counter which\n" " MUST be initialized in a testcluster function.\n" "\n" " //---------------------------------------------------------------------\n" "\n" " @code\n" " #include \"../tools/testrun.h\"\n" "\n" " int example_function() {\n" " return 1;\n" " }\n" "\n" " //---------------------------------------------------------------------\n" "\n" " int test_function1() {\n" "\n" " // use of testrun_check() for evaluation\n" " testrun_check(1 == 1);\n" " testrun_check(1 == 1, \"some additional information\");\n" "\n" " // use of testrun() for evaluation\n" " testrun(1 == 1);\n" " testrun(1 == 1, \"some additional information\");\n" "\n" " // use of manual evaluation and logging\n" " if (1 != example_function()){\n" " testrun_log_error(\"some additional information.\");\n" " return -1;\n" " }\n" "\n" " // will not be reached in case of error\n" " return testrun_log_success();\n" " }\n" "\n" " //---------------------------------------------------------------------\n" "\n" " int test_function2() {\n" "\n" " testrun_check(1 == 1);\n" " return testrun_log_success();\n" " }\n" "\n" " //---------------------------------------------------------------------\n" "\n" " int test_function3() {\n" "\n" " testrun_check(1 == 1);\n" " return testrun_log_success();\n" " }\n" "\n" " //---------------------------------------------------------------------\n" "\n" " int testcase_cluster() {\n" "\n" " testrun_init();\n" "\n" " testrun_test(test_function1);\n" " testrun_test(test_function2);\n" " testrun_test(test_function3);\n" "\n" " return testrun_counter;\n" "\n" " }\n" "\n" " testrun_run(testcase_cluster);\n" " @endcode\n" "\n" "*/\n" "\n" "#endif /* testrun_h */\n"; /* * ------------------------------------------------------------------------ * * C header file #OPENMP_HEADER * * this constant string will be used to generate * the testrun_openmp.h * * ------------------------------------------------------------------------ */ static const char *testrun_header_openmp = "/***\n" " ------------------------------------------------------------------------\n" "\n" " Copyright 2017 Markus Toepfer\n" "\n" " Licensed under the Apache License, Version 2.0 (the \"License\");\n" " you may not use this file except in compliance with the License.\n" " You may obtain a copy of the License at\n" "\n" " http://www.apache.org/licenses/LICENSE-2.0\n" "\n" " Unless required by applicable law or agreed to in writing, software\n" " distributed under the License is distributed on an \"AS IS\" BASIS,\n" " WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n" " See the License for the specific language governing permissions and\n" " limitations under the License.\n" "\n" " This file is part of the testrun project. http://testrun.info\n" "\n" " ------------------------------------------------------------------------\n" " *//**\n" "\n" " @file testrun_openmp.h\n" " @author Markus Toepfer\n" " @date 2017-11-17\n" "\n" " @brief Serial and parallel test executing framework with or\n" " without assertion based testing.\n" "\n" " This is an enhanced and compatible version of the initial idea of an\n" " small and simple C89 compatible C unittest header (@see testrun.h)\n" "\n" " For parallel test runs, this framework makes use of OpenMP. Therefore\n" " the code MUST be compiled with -fopenmp, otherwise the code will stay\n" " unparallel and execution sequential.\n" "\n" " @NOTE to use all provided functionality of the header, tests SHOULD be\n" " compiled using:\n" "\n" " -fopenmp (parallel execution) and\n" " -rdynamic (function name backtracing)\n" "\n" " @NOTE Valgrind based file execution in libomp based OpenMP scenarios\n" " may not work, @see docs/valgrind/openMP/README.MD for additional\n" " information.\n" "\n" " ------------------------------------------------------------------------\n" " */\n" "\n" "#ifndef testrun_openmp_h\n" "#define testrun_openmp_h\n" "\n" "#include <omp.h> /* OpenMP parallel (part of GCC, Clang/LLVM) */\n" "\n" "#include <stdbool.h> /* C99 */\n" "#include <stdint.h> /* C99 */\n" "\n" "#include <unistd.h> /* C89/C90 */\n" "#include <stdlib.h> /* C89/C90 */\n" "#include <stdio.h> /* C89/C90 */\n" "#include <string.h> /* C89/C90 */\n" "#include <errno.h> /* C89/C90 */\n" "#include <time.h> /* C89/C90 */\n" "#include <assert.h> /* C89/C90 */\n" "\n" "#if defined(__GLIBC__)\n" "#include <execinfo.h> /* Gnulib backtrace of function pointer names */\n" "#endif\n" "\n" "#define TESTRUN_DEFAULT_CLUSTER_MAX 1000\n" "\n" "/*----------------------------------------------------------------------------*/\n" "\n" "/**\n" " Error initialization of none error.\n" "*/\n" "#define testrun_errno() \\\n" " (errno == 0 ? \"NONE\" : strerror(errno))\n" "\n" "/*----------------------------------------------------------------------------*/\n" "\n" "/**\n" " Log a failure. Failure: Inability to perform a function as expected.\n" "*/\n" "#define testrun_log_failure(msg, ...) \\\n" " fprintf(stderr, \"\\t[FAIL]\\t%s line:%d errno:%s message: \" msg \"\\n\",\\\n" " __FUNCTION__, __LINE__, testrun_errno(), ##__VA_ARGS__)\n" "\n" "/*----------------------------------------------------------------------------*/\n" "\n" "/**\n" " Log an error. Error: Difference between expected and actual result.\n" "*/\n" "#define testrun_log_error(msg, ...) \\\n" " fprintf(stderr, \"\\t[ERROR]\\t%s line:%d errno:%s message: \" msg \"\\n\",\\\n" " __FUNCTION__, __LINE__, testrun_errno(), ##__VA_ARGS__)\n" "\n" "/*----------------------------------------------------------------------------*/\n" "\n" "#define testrun_log_success(msg, ...) \\\n" " fprintf(stdout, \"\\t[OK] \\t%s \" msg \"\\n\", __FUNCTION__, ##__VA_ARGS__)\n" "\n" "/*----------------------------------------------------------------------------*/\n" "\n" "#define testrun_log(msg, ...) \\\n" " fprintf(stdout, \"\\t\" msg \"\\n\", ##__VA_ARGS__)\n" "\n" "/*----------------------------------------------------------------------------*/\n" "\n" "#define testrun_log_function_info(msg, ...) \\\n" " fprintf(stdout, \"\\t[INFO] \\t%s line:%d message: \" msg \"\\n\", \\\n" " __FUNCTION__, __LINE__, ##__VA_ARGS__)\n" "\n" "/*----------------------------------------------------------------------------*/\n" "\n" "#define testrun_log_clock(start, end) \\\n" " fprintf(stdout, \"\\tClock ticks function: ( %s ) | %f | %.0f ms \\n\",\\\n" " __func__, \\\n" " ((double)(end - start)) / CLOCKS_PER_SEC, \\\n" " (((double)(end - start)) / CLOCKS_PER_SEC ) * 1000)\n" "\n" "/*----------------------------------------------------------------------------\n" " *\n" " * Block of supporting MACROS for assert based testing.\n" " *\n" " * Assert based testing is build around the principle to bundle and\n" " * define some testcases, which will be run in series.\n" " * Within the testcases testrun_assert(), or assert() may be used to\n" " * stop testing.\n" " *\n" " * -----------------------------------------------------------------\n" " *\n" " * Example usage:\n" " *\n" " * int testcase1_function(){\n" " * assert(true);\n" " * return testrun_log_success();\n" " * }\n" " *\n" " * int testcase1_function(){\n" " * testrun_assert(true, \"additional info an failure.\");\n" " * return testrun_log_success();\n" " * }\n" " *\n" " * int testseries() {\n" " *\n" " * testrun_init();\n" " *\n" " * testrun_test(testcase1_function);\n" " * testrun_test(testcase2_function);\n" " *\n" " * return testrun_counter;\n" " * }\n" " *\n" " * testrun_run(testseries);\n" " *\n" " *----------------------------------------------------------------------------*/\n" "\n" "#define testrun_init() \\\n" " int result = 0; \\\n" " int testrun_counter = 0;\n" "\n" "/*----------------------------------------------------------------------------*/\n" "\n" "/**\n" " Wrapper around assert, which adds a message level to assert, to provide\n" " additional and related information e.g. a failure description.\n" "\n" " @param test an actual test case e.g. (1 == 0)\n" " @param message additional message to log e.g. \"Failure: 1 is not one\"\n" "*/\n" "#define testrun_assert(test, ... )\\\n" " if (!(test)) { testrun_log_error(__VA_ARGS__); assert(test); }\n" "\n" "/*----------------------------------------------------------------------------*/\n" "\n" "/**\n" " Run a single test (execute a function pointer. Runs a test function.\n" " On non negative return value of the function run, a testrun_counter\n" " is increased, on negative result, the negative result will be returned.\n" "\n" " @param test function pointer of the test to run\n" " @NOTE The surrounding block is left on negative result of the\n" " function pointer execution.\n" "*/\n" "#define testrun_test(test)\\\n" " result = test(); testrun_counter++; if (result < 0) return result;\n" "\n" "/*----------------------------------------------------------------------------*/\n" "\n" "/**\n" " Runs a function pointer, which SHALL contain the test function pointers\n" " to run. The function pointer is wrapped in a main procedure, which and\n" " allows indepentent testruns of the input testcluster over external\n" " execution.\n" "\n" " A clock will be started, as soon as the main is executed and the the\n" " time is stopped again, at the end of the execution. The difference\n" " will be printed and is the runtime of the whole input testcluster.\n" "\n" " A run will fail, as soon as one of the tests in the testcluster fails.\n" " (Fail on first) or will run all functions dependent on the testsetup.\n" "\n" " @param testcluster function pointer to be executed.\n" "*/\n" "#define testrun_run(testcluster) int main(int argc, char *argv[]) {\\\n" " argc = argc;\\\n" " clock_t start1_t, end1_t; \\\n" " start1_t = clock(); \\\n" " testrun_log(\"\\ntestrun\\t%s\", argv[0]);\\\n" " int64_t result = testcluster();\\\n" " if (result > 0) \\\n" " testrun_log(\"ALL TESTS RUN (%jd tests)\", result);\\\n" " end1_t = clock(); \\\n" " testrun_log_clock(start1_t, end1_t); \\\n" " testrun_log(\"\");\\\n" " result >= 0 ? exit(EXIT_SUCCESS) : exit(EXIT_FAILURE); \\\n" "}\n" "\n" "/*----------------------------------------------------------------------------\n" " *\n" " * Block of supporting MACROS an inline functions for sequntial and\n" " * parallel testing. Most of the functionality is realted to configure\n" " * testseries for parallel and/or sequential runs. Which functions may\n" " * be run as parallel tests or sequential tests, is up to the test\n" " * developer.\n" " *\n" " * This type of testing is highly customizable and may be adapted\n" " * and customized by each test module implementation.\n" " *\n" " * -----------------------------------------------------------------\n" " *\n" " * An implementation MUST to support the testrun_fun_tests() function\n" " * is the implementation of the configure functions. These functions\n" " * define, which testseries may be run in parallel and which sequential.\n" " *\n" " * bool testrun_configure_parallel(\n" " * int (*testcases[])(),\n" " * size_t * const start,\n" " * size_t const * const max);\n" " *\n" " * as well as\n" " *\n" " * bool testrun_configure_sequential(\n" " * int (*testcases[])(),\n" " * size_t * const start,\n" " * size_t const * const max);\n" " *\n" " * -----------------------------------------------------------------\n" " *\n" " * Example usage:\n" " *\n" " * int testcase1_function(){\n" " * testrun(true);\n" " * return testrun_log_success();\n" " * }\n" " *\n" " * int testcase1_function(){\n" " * testrun(true, \"additional info an failure.\");\n" " * return testrun_log_success();\n" " * }\n" " *\n" " * int64_t testseries(int(*tests[])(), size_t slot, size_t max) {\n" " *\n" " * testrun_init();\n" " *\n" " * testrun_add(testcase1_function);\n" " * testrun_add(testcase2_function);\n" " *\n" " * return testrun_counter;\n" " * }\n" " *\n" " * -----------------------------------------------------------------\n" " *\n" " * NOTE: Here we configure a testseries to be run sequential and parallel\n" " *\n" " * bool testrun_configure_parallel(\n" " * int (*testcases[])(),\n" " * size_t * const start,\n" " * size_t const * const max){\n" " *\n" " * if (testrun_add_testcases(testcases,start,end,testseries) < 0)\n" " * return false;\n" " *\n" " * return true;\n" " *\n" " * bool testrun_configure_sequential(\n" " * int (*testcases[])(),\n" " * size_t * const start,\n" " * size_t const * const max){\n" " *\n" " * if (testrun_add_testcases(testcases,start,end,testseries) < 0)\n" " * return false;\n" " *\n" " * return true;\n" " *\n" " * -----------------------------------------------------------------\n" " *\n" " * NOTE: This last function definition is needed to configure the\n" " * maximum amount of parallel and sequential tests as parameters\n" " * instead of a predefinition.\n" " *\n" " * int64_t run_tests(){\n" " * return testrun_run_tests(1000,1000,false);\n" " * }\n" " *\n" " * testrun_run(run_tests);\n" " *\n" " *----------------------------------------------------------------------------*/\n" "\n" "/**\n" " MUST be implemented to configure parallel tests.\n" "\n" " @param testcases array of function pointers\n" " @param start first slot the be used in testcases\n" " @param max maximum slots of testcases (last slot to be set)\n" " @returns true on success, false on errror\n" "*/\n" "bool testrun_configure_parallel(\n" " int (*testcases[])(),\n" " size_t * const start,\n" " size_t const * const max);\n" "\n" "/*----------------------------------------------------------------------------*/\n" "\n" "/**\n" " MUST be implemented to configure sequential tests.\n" "\n" " @param testcases array of function pointers\n" " @param start first slot the be used in testcases\n" " @param max maximum slots of testcases (last slot to be set)\n" " @returns true on success, false on errror\n" "*/\n" "bool testrun_configure_sequential(\n" " int (*testcases[])(),\n" " size_t * const start,\n" " size_t const * const max);\n" "\n" "/*----------------------------------------------------------------------------*/\n" "\n" "/**\n" " Run a single atomar test. Return the surrounding block on error.\n" " This function will leave the context block running on error. The\n" " Mindset is a defused assert. LEAVE THE FUNCTION NOT THE PROGRAM.\n" "\n" " @param test Boolean decision input.\n" "*/\n" "#define testrun_check(test, ... )\\\n" " if (!(test)) { testrun_log_error(__VA_ARGS__); return -1;}\n" "\n" "/*----------------------------------------------------------------------------*/\n" "\n" "/**\n" " Alias to @see testrun_check.\n" "*/\n" "#define testrun(test, ...)\\\n" " testrun_check(test, __VA_ARGS__ )\n" "\n" "/*----------------------------------------------------------------------------*/\n" "\n" "/**\n" " Add a new test to the tests array. This is a convinience function\n" " to add a function pointer to the array tests[]. This MACRO uses\n" " block variables **slot**, **testrun_counter**, **max** and **tests[]**.\n" "\n" " @param test function pointer to a new test to be added.\n" "*/\n" "#define testrun_add(test) \\\n" " if (slot + testrun_counter == max) { \\\n" " testrun_log_failure(\"All test slots filled, \" \\\n" " \"check config TESTS[MAX].\"); \\\n" " if (testrun_counter == 0) \\\n" " return -1; \\\n" " return -testrun_counter; \\\n" " } else { \\\n" " tests[slot + testrun_counter] = test; \\\n" " testrun_counter++; \\\n" " }\n" "\n" "/*----------------------------------------------------------------------------*/\n" "\n" "/**\n" " Array initialization to point to NULL.\n" "\n" " @param array array to be initialized\n" " @param start first item to set to NULL\n" " @param end last item to set to NULL\n" "*/\n" "#define testrun_init_testcases(array, start, end, ...) \\\n" " for (size_t i = start; i < end; i++ ) { array[i] = NULL; }\n" "\n" "/*----------------------------------------------------------------------------*/\n" "\n" "/**\n" " Add some test cases to a testcase function pointer array, using\n" " a user provided function to add the testcases.\n" "\n" " Function will log the result of testcases added.\n" "\n" " @param tests pointer to function pointer array\n" " @param last pointer to counter of last set item\n" " @param max pointer to value of max items\n" " @param function function to add the tests to the array\n" "\n" " @returns negative count of testcases to add\n" " positive count of added testcases\n" " */\n" "static inline int64_t testrun_add_testcases(\n" " int (*tests[])(),\n" " size_t * const last,\n" " size_t const * const max,\n" " int64_t (*function)(int (*tests[])(), size_t, size_t)){\n" "\n" " if (!tests || !function || !last || !max)\n" " return -1;\n" "\n" " if (*last > *max)\n" " return -1;\n" "\n" " int64_t r = 0;\n" "\n" " r = function(tests, *last, *max);\n" "\n" " if (r < 0) {\n" "\n" " // reinit all from last to end to NULL\n" " testrun_init_testcases(tests, *last, *max);\n" "\n" " testrun_log_failure(\n" " \"Failed to add tests to TESTS[] \"\n" " \"(usage %jd/%jd)\",\n" " *last, *max);\n" "\n" " return -1;\n" "\n" " } else {\n" "\n" " *last += r;\n" " testrun_log_function_info(\n" " \"added %jd tests to TESTS[]\"\n" " \"(usage %jd/%jd)\",\n" " r, *last, *max);\n" " }\n" "\n" " return r;\n" "\n" "}\n" "\n" "/*----------------------------------------------------------------------------*/\n" "\n" "/**\n" " Dumb the test cases to stdout.\n" "\n" " To enable a backtrace with names, the file MUST be compiled with\n" " MODCFLAGS += -rdynamic\n" "\n" " @param function pointer to function pointer array\n" " @param items amount of items in functions\n" " @param names bool to try to backtrace names\n" " @returns negative count of failed tests\n" " positive count of run tests otherwise\n" " */\n" "static inline bool testrun_dump_testcases(\n" " int (*functions[])(),\n" " size_t max,\n" " bool names) {\n" "\n" " if (!functions || max < 1)\n" " return false;\n" "\n" " void *pointer = NULL;\n" "\n" " // dump is formated to fit to standard header log and to dump 20 digits\n" " fprintf(stdout, \"\\t[DUMP]\\ttestcases tests[%jd]\\n\", max);\n" " if (names){\n" " #if defined(__GLIBC__)\n" " fprintf(stdout, \"\\t[DUMP]\\t ... try to backtrace\\n\");\n" " #else\n" " fprintf(stdout, \"\\t[DUMP]\\t ... names not implemented\\n\");\n" " #endif\n" " }\n" "\n" " for (size_t i = 0; i < max; i++) {\n" "\n" " pointer = (void*) functions[i];\n" "\n" " if (names) {\n" " #if defined(__GLIBC__)\n" " backtrace_symbols_fd(&pointer, 1, STDOUT_FILENO);\n" " #else\n" " // fallback to printf\n" " fprintf(stdout, \"%20jd %p \\n\", i, pointer);\n" " #endif\n" " } else {\n" " fprintf(stdout, \" %20jd %p \\n\", i, pointer);\n" " }\n" "\n" " }\n" "\n" " return true;\n" "}\n" "\n" "/*----------------------------------------------------------------------------*/\n" "\n" "/**\n" " Run a bunch of tests in parallel. This will run all configured\n" " tests independently and return the result of the test batch,\n" " once all tests are done.\n" "\n" " A clock of the batch runtime will be logged in addition to the\n" " result of the testrun.\n" "\n" " @param function pointer to function pointer array\n" " @param items amount of items in functions\n" " @returns negative count of failed tests\n" " positive count of run tests otherwise\n" " */\n" "static inline int64_t testrun_parallel(\n" " int (*functions[])(),\n" " size_t items) {\n" "\n" " if (!functions || items < 1)\n" " return 0;\n" "\n" " if (items > INT64_MAX )\n" " return 0;\n" "\n" " int64_t c_OK = 0;\n" " int64_t c_NOK = 0;\n" "\n" " clock_t start, end;\n" " start = clock();\n" "\n" " int nthreads = 0, tid = 0;\n" "\n" "\n" " /*\n" " * Use this if you want to reduce or set the number of threads\n" " *\n" " * omp_set_dynamic(0);\n" " * omp_set_num_threads(1);\n" " */\n" "\n" " #pragma omp parallel for\n" " for (size_t i = 0; i < items; i++){\n" "\n" " if (nthreads == 0){\n" " tid = omp_get_thread_num();\n" " if (tid == 0)\n" " nthreads = omp_get_num_threads();\n" " }\n" "\n" " if (functions[i] != 0) {\n" "\n" " if (functions[i]() < 0){\n" " #pragma omp atomic\n" " c_NOK++;\n" " } else {\n" " #pragma omp atomic\n" " c_OK++;\n" " }\n" " }\n" " }\n" "\n" " testrun_log(\"---------------------------------------------------------\");\n" " testrun_log(\"NOTE PARALLEL TESTING\");\n" " testrun_log(\"\");\n" " testrun_log(\"This version is using OpenMP. Using GCC for compilation \");\n" " testrun_log(\"may produce false valgrind output due to use of libomp.\");\n" " testrun_log(\"More information is included in docs/valgrind/openMP.\");\n" " testrun_log(\"---------------------------------------------------------\");\n" "\n" "\n" " testrun_log(\"Parallel RUN (%jd) TESTS in %d threads: \"\n" " \"success %jd error %jd)\",\n" " c_OK + c_NOK, nthreads,\n" " c_OK, c_NOK);\n" "\n" " end = clock();\n" " testrun_log_clock(start, end);\n" " testrun_log(\"\");\n" "\n" " if (c_NOK > 0)\n" " return -c_NOK;\n" "\n" " return c_OK;\n" "}\n" "\n" "/*----------------------------------------------------------------------------*/\n" "\n" "/**\n" " Run a bunch of tests serial. This will run all configured\n" " tests independently and return the result of the test batch,\n" " once all tests are done or the first tests fails, if break_on_error\n" " is set.\n" "\n" " A clock of the batch runtime will be logged in addition to the\n" " result of the testrun.\n" "\n" " @param function pointer to function pointer array\n" " @param items amount of items in function\n" " @param break_on_error (true) fail test batch on first error\n" " (false) run all tests before error return\n" " @returns negative count of failed tests\n" " positive count of run tests otherwise\n" " */\n" "static inline int64_t testrun_sequential(\n" " int (*functions[])(),\n" " size_t items,\n" " bool break_on_error) {\n" "\n" " if (!functions || items < 1)\n" " return 0;\n" "\n" " if (items > INT64_MAX )\n" " return 0;\n" "\n" " int64_t c_OK = 0;\n" " int64_t c_NOK = 0;\n" "\n" " clock_t start, end;\n" " start = clock();\n" "\n" " for (size_t i = 0; i < items; i++){\n" "\n" " if (functions[i] != 0) {\n" "\n" " if (functions[i]() < 0) {\n" "\n" " c_NOK++;\n" " if (break_on_error)\n" " break;\n" "\n" " } else {\n" "\n" " c_OK++;\n" "\n" " }\n" " }\n" " }\n" "\n" " testrun_log(\"Serial RUN (%jd) TESTS: success %jd error %jd)\",\n" " c_OK + c_NOK,\n" " c_OK, c_NOK);\n" "\n" " end = clock();\n" " testrun_log_clock(start, end);\n" " testrun_log(\"\");\n" "\n" " if (c_NOK > 0)\n" " return -c_NOK;\n" "\n" " return c_OK;\n" "}\n" "\n" "/*----------------------------------------------------------------------------*/\n" "\n" "/**\n" " Run a bunch of configurable parallel and sequential tests serial.\n" "\n" " @param max_parallel maximum test cases parallel\n" " @param max_sequential maximum test cases sequential\n" " @param break_on_error (true) fail sequential test batch on first error\n" " (false) run all sequential tests\n" " @returns negative count of run tests cased on error\n" " positive count of run tests\n" " */\n" "static inline int64_t testrun_run_tests(\n" " size_t max_parallel,\n" " size_t max_sequential,\n" " bool break_on_error) {\n" "\n" " int64_t result_parallel = 0;\n" " int64_t result_sequential = 0;\n" " size_t counter_parallel = 0;\n" " size_t counter_sequential = 0;\n" "\n" " if ( (max_parallel == 0) && (max_sequential == 0))\n" " return -1;\n" "\n" " // LOAD & RUN test cases\n" "\n" " if (max_parallel > 0) {\n" "\n" " int (*testcases[max_parallel])();\n" " testrun_init_testcases(testcases, 0, max_parallel);\n" "\n" " if (!testrun_configure_parallel(\n" " testcases, &counter_parallel, &max_parallel)){\n" " testrun_log_failure(\"Failure configure parallel.\");\n" " return -1;\n" " }\n" "\n" " result_parallel = testrun_parallel(testcases, counter_parallel);\n" "\n" " if (result_parallel < 0)\n" " testrun_log(\"Failure testrun parallel run\");\n" "\n" " }\n" "\n" " if (max_sequential > 0) {\n" "\n" " int (*testcases[max_sequential])();\n" " testrun_init_testcases(testcases, 0, max_sequential);\n" "\n" " if (!testrun_configure_sequential(\n" " testcases, &counter_sequential, &max_sequential)){\n" " testrun_log_failure(\"Failure configure sequential.\");\n" " return -1;\n" " }\n" "\n" " result_sequential = testrun_sequential(\n" " testcases, counter_sequential, break_on_error);\n" "\n" " if (result_sequential < 0)\n" " testrun_log(\"Failure testrun sequential run\");\n" "\n" " }\n" "\n" " if ( (result_parallel < 0) || (result_sequential < 0)) {\n" " if ( (counter_parallel + counter_sequential) == 0)\n" " return -1;\n" " return ( -1 * (counter_parallel + counter_sequential));\n" " }\n" "\n" " return (counter_parallel + counter_sequential);\n" "}\n" "\n" "/** -----------------------------------------------------------------------\n" "\n" " @example testrun_assert_example.c\n" " @author Markus Toepfer\n" " @date 2017-10-31\n" "\n" " @brief Example test file using testrun.h\n" "\n" " This example shows assert() style based testing with testrun.h and is\n" " build around the testrun_test() macro, which increases a counter which\n" " MUST be initialized in a testcluster function.\n" "\n" " -----------------------------------------------------------------------\n" "\n" " @code\n" " #include \"../tools/testrun_parallel.h\"\n" "\n" " bool example_function() {\n" " return true;\n" " }\n" " -----------------------------------------------------------------------\n" "\n" " int test_with_assert_function() {\n" "\n" " // Fail on first testing\n" " //\n" " // Fail on first can be implemented using assert,\n" " // or by returning a negative result of the testrun_test\n" " // The following examples do all the same, the will stop\n" " // the whole testrun and report a failure.\n" "\n" " testrun_assert(\n" " example_function() == true, \\\n" " \"Failure: NOK result is true.\"\n" " );\n" "\n" " assert(true == example_function());\n" " assert(example_function());\n" "\n" " if (!example_function())\n" " return -1;\n" "\n" " // will not be reached in case of error\n" " return testrun_log_success();\n" " }\n" "\n" " -----------------------------------------------------------------------\n" "\n" " int test_whatever_OK() {\n" "\n" " bool failure = false;\n" "\n" " // Positive result logging\n" "\n" " if (!failure)\n" " return testrun_log_success();\n" "\n" " // will be reached in case of error\n" " return testrun_log_error();\n" " }\n" "\n" " -----------------------------------------------------------------------\n" "\n" " int test_whatever_NOK() {\n" "\n" " // Failure logging (Don't fail the testrun, just log a failure)\n" "\n" " if (failure)\n" " return testrun_log_error();\n" "\n" " // will not be reached in case of error\n" " return testrun_log_success();\n" "\n" " }\n" "\n" " -----------------------------------------------------------------------\n" "\n" " int assert_based_testing() {\n" "\n" " testrun_init();\n" "\n" " testrun_test(test_with_assert_function);\n" " testrun_test(test_whatever_OK);\n" " testrun_test(test_whatever_NOK);\n" "\n" " return testrun_counter;\n" "\n" " }\n" "\n" " testrun_run(assert_based_testing);\n" " @endcode\n" "\n" "**/\n" "/** -----------------------------------------------------------------------\n" "\n" " @example testrun_example.c\n" " @author Markus Toepfer\n" " @date 2017-11-22\n" "\n" " @brief Example test file using testrun.h\n" "\n" " This example shows parallel and sequential style based testing\n" " with testrun.h and is build around a MACRO set to execute tests in\n" " parallel or seqentuial run.\n" "\n" " //---------------------------------------------------------------------\n" "\n" " @code\n" " #include \"../tools/testrun_parallel.h\"\n" "\n" " bool example_function() {\n" " return true;\n" " }\n" "\n" " //---------------------------------------------------------------------\n" "\n" " int testcase_block1(){\n" "\n" " testrun(example_function());\n" " testrun(true);\n" " testrun(example_function(), \"second run of function.\");\n" "\n" " return testrun_log_success();\n" " }\n" "\n" " //---------------------------------------------------------------------\n" "\n" " int testcase_block2(){\n" "\n" " return testrun_log_success();\n" " }\n" "\n" " //---------------------------------------------------------------------\n" "\n" " int testcase_block3(){\n" "\n" " return testrun_log_success();\n" " }\n" "\n" " //---------------------------------------------------------------------\n" "\n" " Int testcase_block4(){\n" "\n" " return testrun_log_success();\n" " }\n" "\n" " //---------------------------------------------------------------------\n" "\n" " int64_t cluster_tests1(int(*tests[])(), size_t slot, size_t max) {\n" "\n" " testrun_init(); // create local variables\n" " testrun_add(testcase_block1); // adds block1 to tests[]\n" " testrun_add(testcase_block2); // adds block2 to tests[]\n" "\n" " return testrun_counter;\n" " }\n" "\n" " //---------------------------------------------------------------------\n" "\n" " int64_t cluster_tests2(int(*tests[])(), size_t slot, size_t max) {\n" "\n" " testrun_init(); // create local variables\n" " testrun_add(testcase_block3); // adds block3 to tests[]\n" " testrun_add(testcase_block4); // adds block4 to tests[]\n" "\n" " return testrun_counter;\n" " }\n" "\n" " //---------------------------------------------------------------------\n" "\n" " bool testrun_configure_parallel(\n" " int (*testcases[])(),\n" " size_t * const start,\n" " size_t const * const max){\n" "\n" " if (!testcases || !start || !max)\n" " return false;\n" "\n" " if(testrun_add_testcases(\n" " testcases,start, max, cluster_tests1) < 0)\n" " return false;\n" "\n" " return true;\n" " }\n" "\n" " //---------------------------------------------------------------------\n" "\n" "\n" " bool testrun_configure_sequential(\n" " int (*testcases[])(),\n" " size_t *const start,\n" " size_t const * const max){\n" "\n" " if (!testcases || !start || !max)\n" " return false;\n" "\n" " if(testrun_add_testcases(\n" " testcases,start, max, cluster_tests1) < 0)\n" " return false;\n" "\n" " if(testrun_add_testcases(\n" " testcases,start, max, cluster_tests2) < 0)\n" " return false;\n" "\n" " return true;\n" "\n" " }\n" "\n" " //---------------------------------------------------------------------\n" "\n" " int64_t run_tests() {\n" "\n" " return testrun_run_tests(1000,1000,false);\n" " }\n" "\n" " testrun_run(run_tests);\n" " @endcode\n" "\n" "**/\n" "\n" "#endif /* testrun_openmp_h */\n"; /* * ------------------------------------------------------------------------ * * Gitignore file #GITRIGNORE * * this constant string will be used to generate * the default gitignore content. * * ------------------------------------------------------------------------ */ static const char *testrun_gitignore = "# Prerequisites\n" "*.d\n" "\n" "# Object files\n" "*.o\n" "*.so\n" "*.ko\n" "*.obj\n" "*.elf\n" "\n" "# Linker output\n" "*.ilk\n" "*.map\n" "*.exp\n" "\n" "# Precompiled Headers\n" "*.gch\n" "*.pch\n" "\n" "# Libraries\n" "*.lib\n" "*.a\n" "*.la\n" "*.lo\n" "\n" "# Shared objects (inc. Windows DLLs)\n" "*.dll\n" "*.so\n" "*.so.*\n" "*.dylib\n" "\n" "# Executables\n" "*.exe\n" "*.out\n" "*.app\n" "*.i*86\n" "*.x86_64\n" "*.hex\n" "\n" "# Debug files\n" "*.dSYM/\n" "*.su\n" "*.idb\n" "*.pdb\n" "\n" "# Kernel Module Compile Results\n" "*.mod*\n" "*.cmd\n" ".tmp_versions/\n" "modules.order\n" "Module.symvers\n" "Mkfile.old\n" "dkms.conf\n" "\n" "# Local files\n" "**/local\n" "**/bin/\n" "**/gen/\n" "**/build/\n" "**/docs/doxygen/\n" "**/doxygen/documentation/\n" "\n" "# vagrant (if used)\n" ".vagrant\n" "\n" "# subprojects (if used)\n" "*.git\n" "\n" "# generated package config\n" "*.pc\n" "\n" "# ctags\n" ".tags\n" "tags\n" "functions\n" "\n" "# IDE\n" "\n" "## IntelliJ\n" ".idea\n" "\n" "## Sublime\n" "*.sublime-workspace\n" "*.sublime-project\n" "\n" "## VIM\n" "[._]*.s[a-w][a-z]\n" "[._]s[a-w][a-z]\n" "*.un~\n" "Session.vim\n" ".netrwhist\n" "*~\n"; /*----------------------------------------------------------------------------*/ char *testrun_generate_header(){ return strdup(testrun_header); } /*----------------------------------------------------------------------------*/ char *testrun_generate_header_openmp(){ return strdup(testrun_header_openmp); } /*----------------------------------------------------------------------------*/ char *testrun_generate_gitignore(){ return strdup(testrun_gitignore); } /*----------------------------------------------------------------------------*/ char *testrun_generate_readme( const char *projectname, const char *description, const char *copyright_string){ size_t size = 5000; char buffer[size]; memset(buffer, 0, size); snprintf(buffer, size, "# Project %s\n" "\n" "This module is self supported and may be build, tested, installed and\n" "run independently.\n" "\n" "## Overview\n" "\n" "* [Description](#description)\n" "* [Usage](#usage)\n" "* [Installation](#installation)\n" "* [Requirements](#requirements)\n" "* [Structure](#structure)\n" "* [Tests](#tests)\n" "* [Tips](#tips)\n" "* [Copyright](#copyright)\n" "\n" "## Description\n" "\n" "%s\n" "\n" "## Usage\n" "\n" "...\n" "\n" "## Installation\n" "\n" "...\n" """\n" "### build sources\n" "\n" "\\`\\`\\`bash\n" "make\n" "\\`\\`\\`\n" "\n" "### build documentation\n" "\n" "\\`\\`\\`bash\n" "make documentation\n" "\\`\\`\\`\n" "\n" "### test sources\n" "\n" "\\`\\`\\`bash\n" "make tested\n" "\\`\\`\\`\n" "\n" "### install binaries\n" "\n" "\\`\\`\\`bash\n" "sudo make install\n" "\\`\\`\\`\n" "\n" "### uninstall binaries\n" "\n" "\\`\\`\\`bash\n" "sudo make uninstall\n" "\\`\\`\\`\n" "\n" "## Requirements\n" "\n" "## Structure\n" "\n" "### Default structure of the folder:\n" "\n" "\\`\\`\\`\n" "<pre>\n" ".\n" "├── README.MD\n" "├── .gitignore\n" "├── makefile\n" "├── makefile_common.mk\n" "│\n" "├── copyright\n" "│ └── ... \n" "│\n" "├── doxygen\n" "│ ├── documentation\n" "│ └── doxygen.config\n" "│\n" "├── docs\n" "│ ├── CHANGELOG.MD\n" "│ └── ...\n" "│\n" "├── include\n" "│ ├── %s.h\n" "│ └── ...\n" "│\n" "├── src\n" "│ ├── %s.c\n" "│ └── ...\n" "│\n" "└── tests\n" " ├── resources\n" " ├── tools\n" " │ ├── testrun.h\n" " │ ├── testrun_runner.sh\n" " │ ├── testrun_gcov.sh\n" " │ ├── testrun_gprof.sh\n" " │ ├── testrun_simple_coverage_tests.sh\n" " │ ├── testrun_simple_unit_tests.sh\n" " │ ├── testrun_simple_acceptance_tests.sh\n" " │ └── testrun_simple_loc.sh\n" " │\n" " ├── acceptance\n" " │ ├── ...\n" " │ └── ...\n" " │\n" " └── unit\n" " ├── %s_test.c\n" " └── ...\n" "\n" "</pre>\n" "\\`\\`\\`\n" "\n" "## Tests\n" "\n" "All test sources will be recompiled on each make run. That means,\n" "all module tests will be created new on any change in any source file.\n" "\n" "### Test a project (all files contained in tests/unit)\n" "\n" "Test compile and run\n" "~~~\n" "make tested\n" "~~~\n" "\n" "## Tips\n" "\n" "## Copyright\n" "\n" "%s\n", projectname, description, projectname, projectname, projectname, copyright_string); return strdup(buffer); } /*----------------------------------------------------------------------------*/ char *testrun_generate_doxygen( const char *project_name, const char *path_doxygen, const char *path_mainfile, const char *input){ size_t size = 5000; char buffer[size]; memset(buffer, 0, size); if (snprintf(buffer, size, "DOXYFILE_ENCODING = UTF-8\n" "PROJECT_NAME = %s\n" "PROJECT_NUMBER = 0.0.1\n" "PROJECT_LOGO = %s/logo.png\n" "PROJECT_BRIEF = %s\n" "OUTPUT_DIRECTORY = %s/documentation\n" "CREATE_SUBDIRS = NO\n" "ALLOW_UNICODE_NAMES = NO\n" "OUTPUT_LANGUAGE = English\n" "MARKDOWN_SUPPORT = YES\n" "AUTOLINK_SUPPORT = YES\n" "USE_MDFILE_AS_MAINPAGE = %s\n" "INPUT = %s\n" "INPUT_ENCODING = UTF-8\n" "FILE_PATTERNS = *.h *.c *.js *.py *.sh\n" "RECURSIVE = YES\n" "EXCLUDE_SYMLINKS = YES\n", project_name, path_doxygen, project_name, path_doxygen, path_mainfile, input)< 0) return NULL; return strdup(buffer); } /*----------------------------------------------------------------------------*/ char *testrun_generate_service_file( const char *project_name, const char *install_path){ size_t size = 5000; char buffer[size]; memset(buffer, 0, size); if (snprintf(buffer, size, "[Unit]\n" "Description= %s service\n" "\n" "[Service]\n" "ExecStart=%s\n" "NonBlocking=True\n" "\n" "[Install]\n" "WantedBy=multi-user.target\n" , project_name, install_path)< 0) return NULL; return strdup(buffer); } /*----------------------------------------------------------------------------*/ char *testrun_generate_socket_file( const char *project_name){ size_t size = 5000; char buffer[size]; memset(buffer, 0, size); if (snprintf(buffer, size, "[Unit]\n" "Description= %s socket\n" "\n" "[Socket]\n" "\n" "# example interface bound\n" "# ListenStream=10.0.0.100:12345\n" "\n" "# example localhost\n" "# ListenStream=127.0.0.1:12345\n" "\n" "# example listen on all\n" "# ListenStream=0.0.0.0:12345\n" "\n" "# example listen on UDP\n" "# ListenDatagram=0.0.0.0:12345\n" "\n" "# Maximum parallel connections for the socket\n" "Backlog=2048\n" "\n" "# TCP Keepalive (1h)\n" "KeepAlive=false\n" "\n" "[Install]\n" "WantedBy=multi-user.target\n" , project_name)< 0) return NULL; return strdup(buffer); } /*----------------------------------------------------------------------------*/ char *testrun_generate_script_simple_tests( const char *type, const char *project, const char *file_name, const char *runner_script, const char *path_logfile, const char *path_tests, const char *path_tools ){ if ( !type || !project || !file_name || !runner_script || !path_logfile || !path_tests || !path_tools) return NULL; size_t size = 5000; char buffer[size]; memset(buffer, 0, size); if (0 > snprintf(buffer, size, "%s" "#\n" "# File %s\n" "# Authors Markus Toepfer\n" "# Date 2017-11-30\n" "#\n" "# Project %s\n" "#\n" "# Description Run all test executables [PATH_TESTS]/*.test\n" "# Run the whole folder, until an error occurs.\n" "#\n" "# MODE FAIL ON ERROR (Fail on first test error)\n" "#\n" "# LOGFILE [PATH_LOGFILE]/%s.<time>.log\n" "#\n" "#\n" "# Usage ./%s /path/to/project\n" "#\n" "# Dependencies bash, touch, chmod, ls, wc, date\n" "#\n" "# Last changed 2018-07-11\n" "# ------------------------------------------------------------------------\n" "\n" "TEST_TYPE=\"%s\"\n" "FOLDER_LOGFILE=\"%s\"\n" "FOLDER_TESTS=\"%s\"\n" "RUNNER_SCRIPT=\"./%s/%s\"\n" "\n" "echo \"-------------------------------------------------------\"\n" "echo \" SIMPLE $TEST_TYPE TESTING\"\n" "echo \"-------------------------------------------------------\"\n" "\n" "start_time=$(date \"+%%Y.%%m.%%d-%%H.%%M.%%S.%%N\")\n" "\n" "# SET A LOGFILE\n" "LOGFILE=\"$FOLDER_LOGFILE/$TEST_TYPE.$start_time.log\"\n" "echo \" (log) $start_time\" > $LOGFILE\n" "touch $LOGFILE\n" "chmod a+w $LOGFILE\n" "\n" "echo \"-------------------------------------------------------\" >> $LOGFILE\n" "echo \" REPORT $TEST_TYPE TESTING\" >> $LOGFILE\n" "echo \"-------------------------------------------------------\" >> $LOGFILE\n" "\n" "# RUN THE RUNNER\n" "sh $RUNNER_SCRIPT $LOGFILE $FOLDER_TESTS FAIL_ON_ERROR\n" "RESULT=$?\n" "\n" "end_time=$(date \"+%%Y.%%m.%%d-%%H.%%M.%%S.%%N\")\n" "\n" "# FINISH THE REPORT\n" "echo \"-------------------------------------------------------\" >> $LOGFILE\n" "echo \"DONE \\t $TEST_TYPE TEST RUN\" >> $LOGFILE\n" "if [ $RESULT -eq 0 ]; then\n" " echo \"RESULT\\t SUCCESS\" >> $LOGFILE\n" "else\n" " echo \"RESULT\\t FAILURE\" >> $LOGFILE\n" "fi\n" "echo \"START \\t $start_time\" >> $LOGFILE\n" "echo \"END \\t $end_time\" >> $LOGFILE\n" "echo \"-------------------------------------------------------\" >> $LOGFILE\n" "\n" "# DUMP THE REPORT ON SUCCESS\n" "if [ $RESULT -eq 0 ]; then\n" " cat $LOGFILE\n" " echo \"\"\n" "else\n" " echo \"\"\n" " echo \"$TEST_TYPE TEST FAILED\"\n" " echo \"Logfile dump stopped to point to last error.\"\n" "fi\n" "exit $RESULT\n", bash_header, file_name, project, type, file_name, type, path_logfile, path_tests, path_tools, runner_script )) return NULL; return strdup(buffer); } /*----------------------------------------------------------------------------*/ char *testrun_generate_script_runner( const char *project, const char *file_name){ if ( !project || !file_name) return NULL; size_t size = 5000; char buffer[size]; memset(buffer, 0, size); if (0 > snprintf(buffer, size, "%s" "#\n" "# File %s\n" "# Authors Markus Toepfer\n" "# Date 2017-11-30\n" "#\n" "# Project %s\n" "#\n" "# Description Run each TEST.test of a folder and log Ok or NOK\n" "# for each executed testfile of the folder.\n" "#\n" "# EXAMPLE OUTPUT\n" "#\n" "# [OK] 1/5 filename1.test\n" "# [NOK] 2/5 filename2.test\n" "#\n" "# MODES\n" "#\n" "# (1) RUN ALL TESTS (log but ignore errors)\n" "# use script with 2 parameters\n" "# e.g. ./testrun_runner.sh logfile /path\n" "#\n" "# This mode will not return a test failure and\n" "# may be used to run all tests and return success\n" "# if all tests was run. (test results are logged)\n" "#\n" "# (2) FAIL ON ERROR (Fail on first error)\n" "# use script with 3 parameters\n" "# e.g. ./testrun_runner.sh logfile /path 1\n" "#\n" "# This mode returns -1 on the first test failure.\n" "#\n" "# PARAMETER\n" "#\n" "# (1) path to logfile destination\n" "# (2) path to folder with test cases\n" "#\n" "#\n" "# Usage ./testrun_runner.sh /path/to/logfile /path/to/test/dir\n" "#\n" "# Dependencies bash, tail, ls, grep, wc\n" "#\n" "# Last changed 2017-11-30\n" "# ------------------------------------------------------------------------\n" "\n" "if [ -z $1 ]; then\n" " echo \"ERROR ... NO LOGFILE INPUT TO SRCIPT\"\n" " exit 1\n" "fi\n" "LOGFILE=$1\n" "\n" "if [ -z $2 ]; then\n" " echo \"ERROR ... FOLDER INPUT TO SRCIPT\"\n" " exit 1\n" "fi\n" "FOLDER=$2\n" "\n" "FAIL_ON_ERROR=0\n" "if [ ! -z $3 ]; then\n" " FAIL_ON_ERROR=1\n" "fi\n" "\n" "if [ ! -w $LOGFILE ]; then\n" " echo \"ERROR ... LOGFILE NOT WRITABLE\"\n" " exit 1\n" "fi\n" "\n" "# ------------------------------------------------------------------------\n" "# PERFORM TESTRUN\n" "# ------------------------------------------------------------------------\n" "\n" "FILES=`ls $FOLDER/ | grep \"\\.test\" | wc -l`\n" "if [ $? -ne 0 ]; then\n" " echo \"ERROR ... could not count files of $FOLDER\"\n" " exit 1\n" "fi\n" "c=0\n" "\n" "if [ $FILES -eq 0 ]; then\n" " exit 0\n" "fi\n" "\n" "for i in $FOLDER/*.test\n" "do\n" " c=$((c+1))\n" "\n" " # RUN EXECUTABLE\n" " $i 2>&1 >> $LOGFILE\n" "\n" " # CHECK RETURN OF EXECUTABLE\n" " if [ $? -ne 0 ]; then\n" "\n" " echo \"NOK\\t(\"$c\"/\"$FILES\")\\t\"$i\n" "\n" " if [ $FAIL_ON_ERROR -eq 1 ]; then\n" " exit 1\n" " fi\n" " else\n" " echo \"OK\\t(\"$c\"/\"$FILES\")\\t\"$i\n" " fi\n" "done\n" "exit 0\n", bash_header, file_name, project)) return NULL; return strdup(buffer); } /*----------------------------------------------------------------------------*/ char *testrun_generate_script_loc( const char *project, const char *file_name, const char *path_header, const char *path_source, const char *path_tests){ if ( !project || !file_name || !path_header || !path_source || !path_tests) return NULL; size_t size = 5000; char buffer[size]; memset(buffer, 0, size); if (0 > snprintf(buffer, size, "%s" "#\n" "# File %s\n" "# Authors Markus Toepfer\n" "# Date 2017-11-30\n" "#\n" "# Project %s\n" "#\n" "# Description Count the lines of header, src and tests.\n" "# This file uses no error checking.\n" "#\n" "# Usage ./%s /path/to/project\n" "#\n" "# Dependencies bash, find, xargs, wc\n" "#\n" "# Last changed 2018-07-11\n" "# ------------------------------------------------------------------------\n" "\n" "FOLDER_INC=\"%s\"\n" "FOLDER_SRC=\"%s\"\n" "FOLDER_TST=\"%s\"\n" "echo \"-------------------------------------------------------\"\n" "echo \" SIMPLE LOC COUNTER\"\n" "echo \"-------------------------------------------------------\"\n" "echo \"\"\n" "echo \"(LOC) HEADER\"\n" "find $1/$FOLDER_INC -name '*.h' | xargs wc -l\n" "echo \"\"\n" "echo \"(LOC) SOURCE\"\n" "find $1/$FOLDER_SRC -name '*.c' | xargs wc -l\n" "echo \"\"\n" "echo \"(LOC) TESTS\"\n" "find $1/$FOLDER_TST -name '*.c' | xargs wc -l\n" "echo \"\"\n" "echo \"-------------------------------------------------------\"\n" "echo \"\"\n", bash_header, file_name, project, file_name, path_header, path_source, path_tests )) return NULL; return strdup(buffer); } /*----------------------------------------------------------------------------*/ char *testrun_generate_script_coverage( const char *project, const char *file_name, const char *src_prefix, const char *test_prefix, const char *path_logfile, const char *path_source, const char *path_tests){ if ( !project || !file_name || !test_prefix || !path_logfile || !path_source || !path_tests) return NULL; size_t size = 10000; char buffer[size]; memset(buffer, 0, size); if (!src_prefix) src_prefix = "src_"; if (0 > snprintf(buffer, size, "%s" "#\n" "# File %s\n" "# Authors Markus Toepfer\n" "# Date 2017-11-30\n" "#\n" "# Project %s\n" "#\n" "# Description Count functions of folder src vs unit test functions.\n" "#\n" "# CONVENTION\n" "#\n" "# Each function in any file of the source folder located\n" "# \"%s\"\n" "# will have a corresponding test function,\n" "# using the same name in a file of the unit tests located at\n" "# \"%s\",\n" "# with a function name prefix of\n" "# \"%s\".\n" "#\n" "# EXAMPLE function | test_function\n" "#\n" "# NOTE This simple coverage test just covers the\n" "# observance of the given coding convention.\n" "#\n" "#\n" "# Usage ./%s /path/to/project\n" "#\n" "# Dependencies bash, ctags, awk, sed, grep\n" "#\n" "# Last changed 2018-07-11\n" "# ------------------------------------------------------------------------\n" "\n" "start_time=$(date \"+%%Y.%%m.%%d-%%H.%%M.%%S.%%N\")\n" "SRC_PREFIX=\"%s\"\n" "TEST_PREFIX=\"%s\"\n" "\n" "SRCDIR=\"$1/%s\"\n" "TESTDIR=\"$1/%s\"\n" "FOLDER_LOGFILE=\"$1/%s\"\n" "" "\n" "# SET A LOGFILE\n" "LOGFILE=\"$FOLDER_LOGFILE/coverage.$start_time.log\"\n" "touch $LOGFILE\n" "chmod a+w $LOGFILE\n" "\n" "echo \"-------------------------------------------------------\" >> $LOGFILE\n" "echo \" REPORT COVERAGE TESTING\" >> $LOGFILE\n" "echo \"-------------------------------------------------------\" >> $LOGFILE\n" "echo \" TIME \t $start_time\" >> $LOGFILE\n" "echo \"\" >> $LOGFILE\n" "\n" "# GENERATE CTAGS SOURCE\n" "cd $SRCDIR\n" "if [ $? -ne 0 ]; then\n" " exit 1\n" "fi\n" "ctags --c-types=f -R\n" "# remove the ctags stuff, to leave just the function lines\n" "sed -e '/[ ]*m$/d' -e '/TAG/d' <tags>functions\n" "# remove anything but the function names\n" "awk '{print $1 }' $SRCDIR/functions > $SRCDIR/functionNamesAll\n" "\n" "# CUSTOMIZATION delete everything, which is not prefixed with custom src prefixes\n" "#cat $SRCDIR/functionNamesAll | sed -ne '/^$SRC_PREFIX_.*/p' > $SRCDIR/functionNames\n" "#cat $SRCDIR/functionNamesAll | sed -ne '/^impl_.*/p' >> $SRCDIR/functionNames\n" "cat $SRCDIR/functionNamesAll >> $SRCDIR/functionNames\n" "\n" "# count the lines\n" "sourceFkt=\"$(cat functions | wc -l)\"\n" "echo \" count source\\t\" $sourceFkt >> $LOGFILE\n" "\n" "# GENERATE CTAGS TESTS\n" "cd $TESTDIR\n" "if [ $? -ne 0 ]; then\n" " exit 1\n" "fi\n" "ctags --c-types=f -R\n" "# remove the ctags stuff, to leave just the function lines\n" "sed -e '/[ ]*m$/d' -e '/TAG/d' <tags>functions\n" "# remove anything but the function names\n" "awk '{print $1 }' $TESTDIR/functions > $TESTDIR/functionNames\n" "\n" "# count the lines\n" "testFkt=\"$(cat functions | grep -i ^$TEST_PREFIX | wc -l)\"\n" "echo \" count tests\\t\" $testFkt >> $LOGFILE\n" "\n" "echo \"\nUNTESTED: \" >> $LOGFILE\n" "# Found functions:\n" "while read line;\n" "do\n" " grep -n '^'$TEST_PREFIX$line'$' $TESTDIR/functionNames > \\\n" " /dev/null || echo $line >> $LOGFILE\n" "done < $SRCDIR/functionNames\n" "\n" "if [ $sourceFkt != 0 ]; then\n" " echo \"............................................\" >> $LOGFILE\n" " echo \"COVERAGE: $sourceFkt $testFkt\" | \\\n" " awk '{ printf $1 \" %%.2f %%%% \\n\", $3/$2*100}' >> $LOGFILE\n" "fi\n" "\n" "cat $LOGFILE\n" "echo \"-------------------------------------------------------\"\n" "echo \"\"\n" "\n" "# cleanup remove the files we created\n" "rm $SRCDIR/tags\n" "rm $SRCDIR/functions\n" "rm $SRCDIR/functionNames\n" "rm $TESTDIR/tags\n" "rm $TESTDIR/functions\n" "rm $TESTDIR/functionNames\n", bash_header, file_name, project, path_source, path_tests, test_prefix, file_name, src_prefix, test_prefix, path_source, path_tests, path_logfile )) return NULL; return strdup(buffer); } /*----------------------------------------------------------------------------*/ char *testrun_generate_script_gcov( const char *project, const char *file_name, const char *path_logfile, const char *path_tests_exec, const char *path_tests_source, const char *exec_suffix, const char *src_suffix ){ if ( !project || !file_name || !path_logfile || !path_tests_exec || !path_tests_source) return NULL; size_t size = 5000; char buffer[size]; memset(buffer, 0, size); if (0 > snprintf(buffer, size, "%s" "#\n" "# File %s\n" "# Authors Markus Toepfer\n" "# Date 2018-02-09\n" "#\n" "# Project %s\n" "#\n" "# Description Run gcov based coverage tests on all test cases.\n" "#\n" "# Usage ./%s /path/to/project\n" "#\n" "# Dependencies bash, gcov\n" "#\n" "# Last changed 2018-07-11\n" "# ------------------------------------------------------------------------\n" "\n" "start_time=$(date \"+%%Y.%%m.%%d-%%H.%%M.%%S.%%N\")\n" "\n" "FOLDER_TEST_EXEC=\"%s\"\n" "FOLDER_TEST_SRC=\"%s\"\n" "FOLDER_LOGFILE=\"%s\"\n" "TEST_EXEC_SUFFIX=\"%s\"\n" "TEST_SRC_SUFFIX=\"%s\"\n" "" "\n" "# SET A LOGFILE\n" "LOGFILE=\"$FOLDER_LOGFILE/gcov.$start_time.log\"\n" "touch $LOGFILE\n" "chmod a+w $LOGFILE\n" "echo \" (log) $start_time\" > $LOGFILE\n" "\n" "echo \"-------------------------------------------------------\" >> $LOGFILE\n" "echo \" GCOV RUNNER\" >> $LOGFILE\n" "echo \"-------------------------------------------------------\" >> $LOGFILE\n" "\n" "for test in $FOLDER_TEST_EXEC/*$TEST_EXEC_SUFFIX; do\n" " $test\n" "done\n" "\n" "FILES=`ls $FOLDER_TEST_EXEC/ | grep $TEST_EXEC_SUFFIX | wc -l`\n" "if [ $? -ne 0 ]; then\n" " echo \"ERROR ... could not count files of $FOLDER_TEST_EXEC\"\n" " exit 1\n" "fi\n" "c=0\n" "\n" "if [ $FILES -eq 0 ]; then\n" " exit 0\n" "fi\n" "\n" "for i in $FOLDER_TEST_SRC/*$TEST_SRC_SUFFIX.c\n" "do\n" " # RUN GCOV\n" " echo $i\n" " gcov $i\n" "done\n" "\n" "# move coverage output to log folder\n" "mv *.gcov $FOLDER_LOGFILE\n" "exit 0\n", bash_header, file_name, project, file_name, path_tests_exec, path_tests_source, path_logfile, exec_suffix, src_suffix )) return NULL; return strdup(buffer); } /*----------------------------------------------------------------------------*/ char *testrun_generate_script_gprof( const char *project, const char *file_name, const char *path_logfile, const char *path_tests_exec, const char *exec_suffix ){ if ( !project || !file_name || !path_logfile || !path_tests_exec) return NULL; size_t size = 5000; char buffer[size]; memset(buffer, 0, size); if (0 > snprintf(buffer, size, "%s" "#\n" "# File %s\n" "# Authors Markus Toepfer\n" "# Date 2018-02-09\n" "#\n" "# Project %s\n" "#\n" "# Description Run gprof based analysis tests on all test cases.\n" "#\n" "# Usage ./%s /path/to/project\n" "#\n" "# Dependencies bash, gprof\n" "#\n" "# Last changed 2018-07-11\n" "# ------------------------------------------------------------------------\n" "\n" "start_time=$(date \"+%%Y.%%m.%%d-%%H.%%M.%%S.%%N\")\n" "\n" "FOLDER_TEST_EXEC=\"%s\"\n" "FOLDER_LOGFILE=\"%s\"\n" "TEST_EXEC_SUFFIX=\"%s\"\n" "" "\n" "# SET A LOGFILE\n" "LOGFILE=\"$FOLDER_LOGFILE/gprof.$start_time.log\"\n" "touch $LOGFILE\n" "chmod a+w $LOGFILE\n" "echo \" (log) $start_time\" > $LOGFILE\n" "\n" "echo \"-------------------------------------------------------\" >> $LOGFILE\n" "echo \" GPROF RUNNER\" >> $LOGFILE\n" "echo \"-------------------------------------------------------\" >> $LOGFILE\n" "\n" "# Execute the test once and profile the execution\n" "for test in $FOLDER_TEST_EXEC/*$TEST_EXEC_SUFFIX; do\n" " name=${test##*/}" " echo \"Profiling\" $name\n" " $test\n" " gprof $test gmon.out > $name.profile\n" "done\n" "\n" "# move profile to build/tests/logs\n" "mv *.profile $FOLDER_LOGFILE\n" "exit 0\n", bash_header, file_name, project, file_name, path_tests_exec, path_logfile, exec_suffix )) return NULL; return strdup(buffer); } /*----------------------------------------------------------------------------*/ char *testrun_generate_makefile_common( const char *project, const char *file_name, const char *path_bin, const char *path_build, const char *path_include, const char *path_source, const char *path_tests, const char *path_tools, const char *path_doxygen, const char *suffix_test_source, const char *suffix_test_exec, const char *script_unit_tests, const char *script_acceptance_tests, const char *script_coverage_tests, const char *script_loc, const char *script_gcov, const char *script_gprof, testrun_makefile_target target ){ if ( !project || !file_name || !path_bin || !path_build || !path_include || !path_source || !path_tests || !path_doxygen || !suffix_test_source || !suffix_test_exec || !script_unit_tests || !script_acceptance_tests || !script_coverage_tests || !script_loc || !script_gcov || !script_gprof) return NULL; char *target_all = NULL; char *target_install = NULL; char *target_uninstall = NULL; switch (target) { case LIB: target_all = "all_lib"; target_install = "install_lib"; target_uninstall = "uninstall_lib"; break; case EXEC: target_all = "all_exec"; target_install = "install_exec"; target_uninstall = "uninstall_exec"; break; case SERVICE: target_all = "all_service"; target_install = "install_service"; target_uninstall = "uninstall_service"; break; default: return NULL; } size_t size = 20000; char buffer[size]; memset(buffer, 0, size); if (0 > snprintf(buffer, size, "%s" "#\n" "# File %s\n" "# Authors Markus Toepfer\n" "# Date 2018-02-18\n" "#\n" "# Project %s\n" "#\n" "# Description Generic makefile for testrun based projects.\n" "#\n" "# Target of this makefile is an independent library\n" "# or executable to be installed at either PREFIX/lib\n" "# or PREFIX/bin.\n" "#\n" "# The TESTING part contains all required functionality\n" "# to use the testrun tools via a makefile. It may be\n" "# seen as a makefile integrated testrunner framework.\n" "#\n" "# in particular:\n" "#\n" "# \"make clean && make tested\"\n" "#\n" "# may be used to build all sources as well as tests from\n" "# scratch and perform an integrated testrun over all after\n" "# compilation.\n" "#\n" "# \"make gcov\"\n" "#\n" "# may be used to rebuild the whole project with gcov\n" "# coverage testing flag enabled.\n" "#\n" "# \"make gprof\"\n" "#\n" "# may be used to rebuild the whole project with gprof\n" "# profiling flags enabled.\n" "#\n" "# Following folder structure is required\n" "#\n" "# bin MUST be located at %s\n" "# build MUST be located at %s\n" "# inludes MUST be located at %s\n" "# sources MUST be located at %s\n" "# tests MUST be located at %s\n" "#\n" "# ALL TEST SCRIPTS MAY BE EXCHANGED WITH CUSTOM RUNNERS\n" "#\n" "# Usage SHOULD be used included by parent makefile\n" "#\n" "# NOTE aligned with tab width 4\n" "#\n" "# Dependencies testrun (makefile & service scripts), doxygen (if used)\n" "#\n" "# Last changed 2018-07-12\n" "# ------------------------------------------------------------------------\n" "\n" "# Switch on colors\n" "GCC_COLORS ?= 'gcc colors available, use them!'\n" "export GCC_COLORS\n" "\n" "# ----- Compiler flags -----------------------------------------------------\n" "\n" "CFLAGS\t\t\t= -Wall -Wextra -fPIC -Iinclude\n" "\n" "CFLAGS\t\t\t+= $(MODCFLAGS)\n" "LFLAGS\t\t\t+= $(MODLFLAGS)\n" "\n" "# ----- Project path calculation (if used included) ------------------------\n" "\n" "PROJECTPATH\t\t:= $(abspath $(dir $(PROJECTMK)))\n" "DIRNAME\t\t\t:= $(notdir $(patsubst %%/,%%,$(dir $(PROJECTMK))))\n" "\n" "# ----- Package config setup -----------------------------------------------\n" "\n" "LIBNAME\t\t\t:= lib$(DIRNAME)\n" "LIBNAMEPC\t\t:= $(LIBNAME).pc\n" "\n" "INCDIR\t\t\t:= $(PREFIX)/usr/local/include/$(DIRNAME)\n" "LIBDIR\t\t\t:= $(PREFIX)/usr/local/lib\n" "EXECDIR\t\t\t:= $(PREFIX)/usr/local/bin\n" "CONFDIR\t\t\t:= $(PREFIX)/etc/$(DIRNAME)\n" "SOCKDIR\t\t\t:= $(PREFIX)/etc/systemd/system\n" "\n" "# ----- TARGETS ------------------------------------------------------------\n" "\n" "INSTALL\t\t\t:= install\n" "\n" "EXECUTABLE\t\t= %s/$(DIRNAME)\n" "\n" "STATIC\t\t\t= %s/lib$(DIRNAME).a\n" "SHARED\t\t\t= $(patsubst %%.a,%%.so,$(STATIC))\n" "\n" "# Source and object files to compile\n" "HEADERS\t\t\t= $(wildcard %s/*.h)\n" "SOURCES\t\t\t= $(wildcard %s/**/*.c %s/*.c)\n" "OBJECTS\t\t\t= $(patsubst %%.c,%%.o,$(SOURCES))\n" "\n" "# Test sources and targets\n" "TESTS_SOURCES = $(wildcard %s/**/*%s.c %s/*%s.c)\n" "TESTS_TARGET = $(patsubst %s/%%.c, %s/tests/%%%s, $(TESTS_SOURCES))\n" "\n" "# GCOV support\n" "GCOV_FILES\t\t= $(patsubst %%.c,%%.gcno,$(SOURCES))\n" "GCOV_FILES\t\t+= $(patsubst %%.c,%%.gcov,$(SOURCES))\n" "GCOV_FILES\t\t+= $(patsubst %%.c,%%.gcda,$(SOURCES))\n" "GCOV_FILES\t\t+= $(patsubst %%.c,%%.gcno,$(TESTS_SOURCES))\n" "GCOV_FILES\t\t+= $(patsubst %%.c,%%.gcda,$(TESTS_SOURCES))\n" "\n" "ifdef USE_GCOV\n" "CFLAGS += -fprofile-arcs -ftest-coverage\n" "LFLAGS += -lgcov --coverage\n" "endif\n" "\n" "ifdef USE_GPROF\n" "CFLAGS += -pg\n" "endif\n" "\n" "# ----- TEST_SCRIPTS -------------------------------------------------------\n" "\n" "TEST_TOOLS_FOLDER\t\t=%s\n" "TEST_SCRIPT_UNIT\t\t= $(TEST_TOOLS_FOLDER)/%s\n" "TEST_SCRIPT_ACCEPTANCE\t= $(TEST_TOOLS_FOLDER)/%s\n" "TEST_SCRIPT_COVERAGE\t=$(TEST_TOOLS_FOLDER)/%s\n" "TEST_SCRIPT_LOC\t\t\t= $(TEST_TOOLS_FOLDER)/%s\n" "TEST_SCRIPT_GCOV\t\t= $(TEST_TOOLS_FOLDER)/%s\n" "TEST_SCRIPT_GPROF\t\t= $(TEST_TOOLS_FOLDER)/%s\n" "\n" "# ----- DEFAULT MAKE RULES -------------------------------------------------\n" "\n" "%%.o : %%.c $(HEADERS)\n" "\t@echo \" (CC) $@\"\n" "\t@$(CC) $(CFLAGS) -o $@ -c $< $(LIBS)\n" "\n" "%%%s.o : %%%s.c\n" "\t@echo \" (CC) $@\"\n" "\t@$(CC) $(CFLAGS) -o $@ -c $< $(LIBS)\n" "\n" "all:\t\t\t%s\n" "install:\t\t%s\n" "uninstall:\t\t%s\n" "\n" "all_lib:\t\tstart lib tests pkgconfig done\n" "all_exec:\t\tstart lib tests $(EXECUTABLE) done\n" "all_service:\tall_exec\n" "\n" "lib:\t\t\tbuild sources\n" "sources:\t\tbuild $(STATIC) $(SHARED)\n" "tests:\t\t\ttests-resources $(TESTS_TARGET)\n" "\n" "$(STATIC): $(OBJECTS)\n" "\t@echo \" (AR) $@ $(OBJECTS)\"\n" "\t@ar rcs $@ $(OBJECTS)\n" "\t@ranlib $@\n" "\n" "$(SHARED): $(STATIC) $(OBJECTS)\n" "\t@echo \" (CC) $@ $(OBJECTS)\"\n" "\t@$(CC) -shared -o $@ $(OBJECTS) $(LIBS) $(LFLAGS)\n" "\n" "$(EXECUTABLE): $(OBJECTS)\n" "\t@echo \" (CC) $@ $(OBJECTS)\"\n" "\t$(CC) -o $@ $(STATIC) $(LIBS) $(LFLAGS)\n" "\n" "# ----- BUILD & CLEANUP ----------------------------------------------------\n" "\n" "build:\n" "\t@mkdir -p %s\n" "\t@mkdir -p %s\n" "\t@mkdir -p %s/tests\n" "\t@mkdir -p %s/tests/unit\n" "\t@mkdir -p %s/tests/acceptance\n" "\t@mkdir -p %s/tests/log\n" "\t@echo \" (MK) directories for build\"\n" "\n" ".PHONY: clean\n" "clean:\n" "\t@echo \" (CLEAN) $(LIBNAME)\"\n" "\t@rm -rf %s %s %s/documentation $(OBJECTS) $(TESTS_OBJECTS) \\\n" "\t$(LIBNAMEPC) $(TESTS_TMP_FILES) $(GCOV_FILES) *.gcov *.profile *.pc *.out\n" "\n" "\n" "# ----- DOCUMENATION -------------------------------------------------------\n" "\n" "#NOTE requires doxygen.PHONY: documentation\n" "documentation:\n" "\tdoxygen %s/doxygen.config\n" "\n" "# ----- INFORMATION PRINTING -----------------------------------------------\n" "\n" "# print out a variable of the make file (e.g. \"make print-PROJECTPATH\")\n" ".PHONY: print\n" "print-%% : ; @echo $* = $($*)\n" "\n" ".PHONY: start\n" "start:\n" "\t@echo \"\\n (HINT) $(PROJECT) ==> running make\\n\"\n" "\n" ".PHONY: done\n" "done:\n" "\t@echo\n" "\t@echo \" (DONE) make $(PROJECT)\"\n" "\t@echo \" (HINT) with unit testing ==> 'make tested'\"\n" "\t@echo \" (HINT) perform installation ==> 'sudo make install\"\n" "\t@echo \" (HINT) generate documentation ==> 'make documentation\"\n" "\n" "# ----- TESTING ------------------------------------------------------------\n" "\n" "# ALL IN ONE CALL (compile source, test and run test)\n" "tested: all testrun done\n" "\n" "# copy test resources to build\n" "tests-resources:\n" "\t@echo \" (CP) tests/resources\"\n" "\t@cp -r %s/resources %s/tests\n" "\n" "%s/tests/acceptance/%%%s%s: %s/acceptance/%%%s.o\n" "\t@echo \" (CC) $(@)\"\n" "\t@$(CC) $(CFLAGS) $(LFLAGS) $^ -ldl $(STATIC) -Wl,-rpath=$(RPATH) -o $(@) $(LIBS)\n" "\n" "%s/tests/unit/%%%s%s: %s/unit/%%%s.o\n" "\t@echo \" (CC) $(@)\"\n" "\t@$(CC) $(CFLAGS) $(LFLAGS) $^ -ldl $(STATIC) -Wl,-rpath=$(RPATH) -o $(@) $(LIBS)\n" "\n" "# TESTRUN runners ----------------------------------------------------------\n" "\n" "# ACCEPTANCE TEST script invocation\n" ".PHONY: testrun-acceptance\n" "testrun-acceptance:\n" "\tsh $(TEST_SCRIPT_ACCEPTANCE)\n" "\n" "# UNIT TEST script invocation\n" ".PHONY: testrun-unit\n" "testrun-unit:\n" "\tsh $(TEST_SCRIPT_UNIT)\n" "\n" "# COVERAGE TEST script invocation\n" ".PHONY: testrun-coverage\n" "testrun-coverage:\n" "\tsh $(TEST_SCRIPT_COVERAGE) $(PROJECTPATH)\n" "\n" "# LOC TEST script invocation\n" ".PHONY: testrun-loc\n" "testrun-loc:\n" "\tsh $(TEST_SCRIPT_LOC) $(PROJECTPATH)\n" "\n" "# TESTRUN all scripts\n" ".PHONY: testrun\n" "testrun:\n" "\t@echo \" (HINT) $(PROJECT) \\t\\t\\t==> running tests\\n\"\n" "\tsh $(TEST_SCRIPT_UNIT)\n" "\tsh $(TEST_SCRIPT_ACCEPTANCE)\n" "\tsh $(TEST_SCRIPT_COVERAGE) $(PROJECTPATH)\n" "\tsh $(TEST_SCRIPT_LOC) $(PROJECTPATH)\n" "\n" "# TESTRUN gcov -------------------------------------------------------------\n" "\n" ".PHONY: testrun-gcov\n" "testrun-gcov: clean\n" "\tmake USE_GCOV=1 all\n" "\tsh $(TEST_SCRIPT_GCOV) $(PROJECTPATH)\n" "\n" "# TESTRUN gprof ------------------------------------------------------------\n" "\n" ".PHONY: testrun-gprof\n" "testrun-gprof: clean\n" "\tmake USE_GPROF=1 all\n" "\tsh $(TEST_SCRIPT_PROF) $(PROJECTPATH)\n" "\n" "# ----- PKGCONFIG LIBRARY BUILD --------------------------------------------\n" "\n" ".PHONY: pkgconfig\n" "pkgconfig:\n" "\t@echo 'prefix='$(PREFIX)'/usr/local/' > $(LIBNAMEPC)\n" "\t@echo 'exec_prefix=$${prefix}' >> $(LIBNAMEPC)\n" "\t@echo 'libdir=$${prefix}/lib' >> $(LIBNAMEPC)\n" "\t@echo 'includedir=$${prefix}/include' >> $(LIBNAMEPC)\n" "\t@echo '' >> $(LIBNAMEPC)\n" "\t@echo 'Name: '$(LIBNAME) >> $(LIBNAMEPC)\n" "\t@echo 'Description: '$(PROJECT_DESC) >> $(LIBNAMEPC)\n" "\t@echo 'Version: '$(VERSION) >> $(LIBNAMEPC)\n" "\t@echo 'URL: ' $(PROJECT_URL) >> $(LIBNAMEPC)\n" "\t@echo 'Libs: -L$${libdir} -l'$(DIRNAME) >> $(LIBNAMEPC)\n" "\t@echo 'Cflags: -I$${includedir}' >> $(LIBNAMEPC)\n" "\n" "# ----- INSTALLATION -------------------------------------------------------\n" "\n" "# Installation as a library ------------------------------------------------\n" "\n" ".PHONY: install_lib\n" "install_lib: $(SHARED) $(STATIC)\n" "\t@echo \" (OK) installed $(LIBNAME) to $(LIBDIR)\"\n" "\t@mkdir -p $(LIBDIR)/pkgconfig\n" "\t@mkdir -p $(INCDIR)\n" "\t@$(INSTALL) -m 0644 -t $(INCDIR) $(shell find include -name \"*.h\")\n" "\t@$(INSTALL) -m 0755 $(SHARED) $(LIBDIR)\n" "\t@$(INSTALL) -m 0755 $(STATIC) $(LIBDIR)\n" "\t@$(INSTALL) -m 0644 $(LIBNAMEPC) $(LIBDIR)/pkgconfig\n" "\t@ldconfig\n" "\n" ".PHONY: uninstall_lib\n" "uninstall_lib:\n" "\t@echo \" (OK) uninstalled $(LIBNAME) from $(LIBDIR)\"\n" "\t@rm -rf $(INCDIR)\n" "\t@rm -rf $(LIBDIR)/$(LIBNAME).a\n" "\t@rm -rf $(LIBDIR)/$(LIBNAME).so\n" "\t@rm -rf $(LIBDIR)/pkgconfig/$(LIBNAMEPC)\n" "\n" "# Installation as an executable --------------------------------------------\n" "\n" ".PHONY: install_exec\n" "install_exec: $(SHARED) $(STATIC)\n" "\t@echo \" (OK) installed $(DIRNAME) to $(EXECDIR)\"\n" "\t@$(INSTALL) -m 0755 bin/$(DIRNAME) $(EXECDIR)\n" "\n" ".PHONY: uninstall_exec\n" "uninstall_exec:\n" "\t@echo \" (OK) uninstalled $(DIRNAME) from $(EXECDIR)\"\n" "\t@rm -rf $(EXECDIR)/$(DIRNAME)\n" "\n" "# Installation as a service ------------------------------------------------\n" ".PHONY: install_service\n" "install_service: copy_service_files enable_service\n" "\t@echo \" (OK) installed $(DIRNAME) to $(EXECDIR)\"\n" "\n" ".PHONY: copy_service_files\n" "copy_service_files: $(EXECUTABLE) \n" "\t@echo \" (OK) copied service files\"\n" "\t@mkdir -p $(SOCKDIR)\n" "\t@$(INSTALL) -m 0755 bin/$(DIRNAME) $(EXECDIR)\n" "\t@$(INSTALL) -m 0755 -d $(SERVICE_DATA)/etc $(CONFDIR)\n" "\t@$(INSTALL) -m 0644 $(SERVICE_DATA)/*.service $(SOCKDIR)\n" "\t@$(INSTALL) -m 0644 $(SERVICE_DATA)/*.socket $(SOCKDIR)\n" "\n" ".PHONY: enable_service\n" "enable_service:\n" "\t@# IF INSTALLATION IS DONE UNPREFIXED TO /etc, the service will be enabled \n" "\t@ifndef ($(PREFIX)) \\\n" "\t\t@echo \" (OK) enable service\" \\\n" "\t\t$(shell systemctl enable $(DIRNAME).socket) \\\n" "\t\t$(shell systemctl start $(DIRNAME).socket) \\\n" "\t\t$(shell systemctl enable $(DIRNAME).service) \\\n" "\t\t$(shell systemctl start $(DIRNAME).service) \\\n" "\t\t$(shell systemctl daemon-reload) \\\n" "\t@endif\n" "\n" ".PHONY: delete_service_files\n" "delete_service_files: \n" "\t@echo \" (OK) delete service files\"\n" "\t@rm -rf $(EXECDIR)/$(DIRNAME)\n" "\t@rm -rf $(CONFDIR)\n" "\t@rm -rf $(SOCKDIR)/$(DIRNAME)*\n" "\n" ".PHONY: disable_service\n" "disable_service:\n" "\t@# IF INSTALLATION WAS DONE UNPREFIXED TO /etc, the service will be disabled \n" "\t@ifndef ($(PREFIX)) \\\n" "\t\t@echo \" (OK) disable service\" \\\n" "\t\t$(shell systemctl stop $(DIRNAME).service) \\\n" "\t\t$(shell systemctl disable $(DIRNAME).service) \\\n" "\t\t$(shell systemctl stop $(DIRNAME).socket) \\\n" "\t\t$(shell systemctl disable $(DIRNAME).socket) \\\n" "\t\t$(shell systemctl daemon-reload) \\\n" "\t@endif\n" "\n" ".PHONY: uninstall_service\n" "uninstall_service: disable_service delete_service_files\n" "\t@echo \" (OK) uninstalled $(DIRNAME) from $(EXECDIR)\"\n" , bash_header, file_name, project, path_bin, path_build, path_include, path_source, path_tests, path_bin, path_build, path_include, path_source, path_source, path_tests, suffix_test_source, path_tests, suffix_test_source, path_tests, path_build, suffix_test_exec, path_tools, script_unit_tests, script_acceptance_tests, script_coverage_tests, script_loc, script_gcov, script_gprof, suffix_test_source, suffix_test_source, target_all, target_install, target_uninstall, path_bin, path_build, path_build, path_build, path_build, path_build, path_bin, path_build, path_doxygen, path_doxygen, path_tests, path_build, path_build, suffix_test_source, suffix_test_exec, path_tests, suffix_test_source, path_build, suffix_test_source, suffix_test_exec, path_tests, suffix_test_source)) return NULL; return strdup(buffer); } /*----------------------------------------------------------------------------*/ char *testrun_generate_makefile( const char *project, const char *file_name, const char *version, const char *cflags, const char *project_url, const char *project_desc, const char *path_service, const char *makefile_common ){ if ( !project || !file_name || !version || !path_service || !makefile_common) return NULL; size_t size = 20000; char buffer[size]; memset(buffer, 0, size); if (0 > snprintf(buffer, size, "%s" "#\n" "# File %s\n" "# Authors Markus Toepfer\n" "# Authors ...\n" "# Date 2018-02-18\n" "#\n" "# Project %s\n" "#\n" "# Description This makefile defines project specific parameter.\n" "#\n" "# These parameter are:\n" "# (1) used compiler and special flags\n" "# (2) name and version\n" "# (3) installation prefix\n" "# (4) used libraries\n" "# (5) general makefiles used\n" "#\n" "# Usage make\n" "#\n" "# Dependencies make & compiler\n" "#\n" "# Last changed 2018-07-12\n" "# ------------------------------------------------------------------------\n" "\n" "CC = gcc\n" "\n" "PROJECT\t\t\t:= %s\n" "VERSION\t\t\t:= %s\n" "\n" "# project path recalculation (if used included from a parent make)\n" "PROJECTMK\t\t:= $(abspath $(lastword $(MAKEFILE_LIST)))\n" "\n" "# prefix for base directory for installation (default is /)\n" "#PREFIX\t\t\t:= some_path\n" "\n" "# include all pkgconfig files available at PREFIX\n" "export PKG_CONFIG_PATH = $(PREFIX)/usr/local/lib/pkgconfig\n" "\n" "# LIBS USED (uncommented example includes)\n" "# ... will allow to include libs installed under PREFIX\n" "#LIBS\t\t\t+= `pkg-config --cflags --libs libtestrun.info`\n" "#LIBS\t\t\t+= `pkg-config --libs libsystemd`\n" "#LIBS\t\t\t+= `pkg-config --libs uuid`\n" "#LIBS\t\t\t+= `pkg-config --libs openssl`\n" "\n" "# MODULE BASED CFLAGS (example)\n" "MODCFLAGS\t\t+= %s\n" "\n" "# EXTRA CFLAGS (example parallel or other GCC custom flags)\n" "#MODCFLAGS\t\t+= -fopenmp\n" "#MODCFLAGS\t\t+= -rdynamic\n" "\n" "# EXTRA LFLAGS (example)\n" "#MODLFLAGS\t\t+= -pthread\n" "\n" "# PKG_CONFIG_DATA (used during LIBRARY install)\n" "PROJECT_URL\t\t= \"%s\"\n" "PROJECT_DESC\t= \"%s\"\n" "\n" "# SERVICE_CONFIG_DATA (used during SERVICE install)\n" "SERVICE_DATA\t= \"%s\"\n" "\n" "# TMP FILE DEFINITION\n" "TESTS_TMP_FILES\t= $(wildcard /tmp/test_*)\n" "\n" "# INCLUDE BASE MAKEFILE\n" "include %s\n" , bash_header, file_name, project, project, version, cflags, project_url, project_desc, path_service, makefile_common )) return NULL; return strdup(buffer); } /*----------------------------------------------------------------------------*/ testrun_tools testrun_tools_default(){ struct testrun_tools tools = { .testrun_header = testrun_generate_header, .testrun_header_openmp = testrun_generate_header_openmp, .testrun_simple_tests = testrun_generate_script_simple_tests, .testrun_runner = testrun_generate_script_runner, .testrun_loc = testrun_generate_script_loc, .testrun_simple_coverage = testrun_generate_script_coverage, .testrun_gcov = testrun_generate_script_gcov, .testrun_gprof = testrun_generate_script_gprof, .makefile_configurable = testrun_generate_makefile, .makefile_common = testrun_generate_makefile_common, .gitignore = testrun_generate_gitignore, .readme = testrun_generate_readme, .doxygen = testrun_generate_doxygen, .service_file = testrun_generate_service_file, .socket_file = testrun_generate_socket_file }; return tools; } /*----------------------------------------------------------------------------*/ bool testrun_tools_validate(const testrun_tools *self){ if ( !self || !self->testrun_header || !self->testrun_header_openmp || !self->testrun_simple_tests || !self->testrun_runner || !self->testrun_loc || !self->testrun_simple_coverage || !self->testrun_gcov || !self->testrun_gprof || !self->makefile_configurable || !self->makefile_common || !self->gitignore || !self->readme || !self->doxygen || !self->service_file || !self->socket_file) return false; return true; }
Atlas.h
#pragma once #include <llassetgen/DistanceTransform.h> #include <llassetgen/Image.h> #include <llassetgen/Packing.h> namespace llassetgen { using ImageTransform = void (*)(Image&, Image&); namespace internal { template <class Iter> constexpr int checkImageIteratorType() { using IterTraits = typename std::iterator_traits<Iter>; using IterType = typename IterTraits::value_type; using IterCategory = typename IterTraits::iterator_category; static_assert(std::is_assignable<Image, IterType>::value, "Input elements must be assignable to Image"); static_assert(std::is_base_of<std::random_access_iterator_tag, IterCategory>::value, "Input iterator must be a RandomAccessIterator"); return 0; } } template <class ImageIter> Image fontAtlas(ImageIter imgBegin, ImageIter imgEnd, Packing packing, uint8_t bitDepth = 1) { using DiffType = typename std::iterator_traits<ImageIter>::difference_type; assert(std::distance(imgBegin, imgEnd) == static_cast<DiffType>(packing.rects.size())); Image atlas{packing.atlasSize.x, packing.atlasSize.y, bitDepth}; atlas.clear(); #pragma omp parallel for for (int i = 0; i < std::distance(imgBegin, imgEnd); i++) { auto& rect = packing.rects[i]; Image view = atlas.view(rect.position, rect.position + rect.size); view.copyDataFrom(imgBegin[i]); } return atlas; } /* * Every Image corresponds to a Rect from the Packing. If a Rect's size is smaller than its Image's * size, the Image will be downsampled in the returned atlas. The downsampling ratio is determined by * dividing the Image's size by its Rect's size. Only integer ratios are allowed: if the division * has a remainder, an error will occur. */ template <class ImageIter> Image distanceFieldAtlas(ImageIter imgBegin, ImageIter imgEnd, Packing packing, ImageTransform distanceTransform, ImageTransform downSampling) { internal::checkImageIteratorType<ImageIter>(); using DiffType = typename std::iterator_traits<ImageIter>::difference_type; assert(std::distance(imgBegin, imgEnd) == static_cast<DiffType>(packing.rects.size())); Image atlas{packing.atlasSize.x, packing.atlasSize.y, DistanceTransform::bitDepth}; atlas.fillRect({0, 0}, atlas.getSize(), DistanceTransform::backgroundVal); const int max = std::distance(imgBegin, imgEnd); #pragma omp parallel for for (int i = 0; i < max; i++) { auto& imgInput = imgBegin[i]; Image distField{imgInput.getWidth(), imgInput.getHeight(), DistanceTransform::bitDepth}; distanceTransform(imgInput, distField); auto& rect = packing.rects[i]; Image output = atlas.view(rect.position, rect.position + rect.size); downSampling(output, distField); } return atlas; } }
ast-dump-openmp-distribute.c
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s void test_one(int x) { #pragma omp distribute for (int i = 0; i < x; i++) ; } void test_two(int x, int y) { #pragma omp distribute for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) ; } void test_three(int x, int y) { #pragma omp distribute collapse(1) for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) ; } void test_four(int x, int y) { #pragma omp distribute collapse(2) for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) ; } void test_five(int x, int y, int z) { #pragma omp distribute collapse(2) for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) for (int i = 0; i < z; i++) ; } // CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK: |-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-distribute.c:3:1, line:7:1> line:3:6 test_one 'void (int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:22, line:7:1> // CHECK-NEXT: | `-OMPDistributeDirective {{.*}} <line:4:1, col:23> // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:5:3, line:6:5> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK-NEXT: | | |-ForStmt {{.*}} <line:5:3, line:6:5> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:5:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:6:5> // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-distribute.c:4:1) *const restrict' // CHECK-NEXT: | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | `-DeclRefExpr {{.*}} <col:3> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: |-FunctionDecl {{.*}} <line:9:1, line:14:1> line:9:6 test_two 'void (int, int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:22, col:26> col:26 used y 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:29, line:14:1> // CHECK-NEXT: | `-OMPDistributeDirective {{.*}} <line:10:1, col:23> // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK-NEXT: | | |-ForStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:11:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ForStmt {{.*}} <line:12:5, line:13:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:12:10, col:19> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:13:7> // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-distribute.c:10:1) *const restrict' // CHECK-NEXT: | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:11:3> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: |-FunctionDecl {{.*}} <line:16:1, line:21:1> line:16:6 test_three 'void (int, int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:17, col:21> col:21 used x 'int' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:24, col:28> col:28 used y 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:31, line:21:1> // CHECK-NEXT: | `-OMPDistributeDirective {{.*}} <line:17:1, col:35> // CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:24, col:34> // CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:33> 'int' // CHECK-NEXT: | | |-value: Int 1 // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:33> 'int' 1 // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK-NEXT: | | |-ForStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:18:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ForStmt {{.*}} <line:19:5, line:20:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:19:10, col:19> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:20:7> // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-distribute.c:17:1) *const restrict' // CHECK-NEXT: | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:18:3> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: |-FunctionDecl {{.*}} <line:23:1, line:28:1> line:23:6 test_four 'void (int, int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:30, line:28:1> // CHECK-NEXT: | `-OMPDistributeDirective {{.*}} <line:24:1, col:35> // CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:24, col:34> // CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:33> 'int' // CHECK-NEXT: | | |-value: Int 2 // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:33> 'int' 2 // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK-NEXT: | | |-ForStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:25:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ForStmt {{.*}} <line:26:5, line:27:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:26:10, col:19> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:27:7> // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-distribute.c:24:1) *const restrict' // CHECK-NEXT: | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:25:3> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:26:5> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: `-FunctionDecl {{.*}} <line:30:1, line:36:1> line:30:6 test_five 'void (int, int, int)' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:30, col:34> col:34 used z 'int' // CHECK-NEXT: `-CompoundStmt {{.*}} <col:37, line:36:1> // CHECK-NEXT: `-OMPDistributeDirective {{.*}} <line:31:1, col:35> // CHECK-NEXT: |-OMPCollapseClause {{.*}} <col:24, col:34> // CHECK-NEXT: | `-ConstantExpr {{.*}} <col:33> 'int' // CHECK-NEXT: | |-value: Int 2 // CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:33> 'int' 2 // CHECK-NEXT: `-CapturedStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK-NEXT: | |-ForStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: | | |-DeclStmt {{.*}} <line:32:8, col:17> // CHECK-NEXT: | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | |-<<<NULL>>> // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | `-ForStmt {{.*}} <line:33:5, line:35:9> // CHECK-NEXT: | | |-DeclStmt {{.*}} <line:33:10, col:19> // CHECK-NEXT: | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | |-<<<NULL>>> // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | `-ForStmt {{.*}} <line:34:7, line:35:9> // CHECK-NEXT: | | |-DeclStmt {{.*}} <line:34:12, col:21> // CHECK-NEXT: | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | |-<<<NULL>>> // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<' // CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | `-NullStmt {{.*}} <line:35:9> // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-distribute.c:31:1) *const restrict' // CHECK-NEXT: | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: |-DeclRefExpr {{.*}} <line:32:3> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: |-DeclRefExpr {{.*}} <line:33:5> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
quadrature_omp.c
#include "quadrature.h" #include <omp.h> double trapezoid(double (*fp)(double), double a, double b, int N) { double h = (b-a)/(N-1); double f_a, f_b, trapezoid = 0.0; // Perform the trapezoid rule #pragma omp parallel for reduction(+: trapezoid) for (int i = 0; i < N; ++i) { if (i == 0) { f_a = (*fp)(a); trapezoid += f_a; } else if (i == N-1) { f_b = (*fp)(a + h*i); trapezoid += f_b; } else { trapezoid += (*fp)(a + h*i); } } trapezoid = h*(trapezoid) - 0.5*h*(f_a + f_b); return trapezoid; } void error_table(double (*fp)(double), double a, double b, int nrows, int nvals[], double int_true) { double last_error = 0.0, error, int_trap, ratio; printf("%8s %22s %13s %13s\n", "n", "trapezoid", "error", "ratio"); for (int i = 0; i < nrows; ++i) { int_trap = trapezoid(fp, a, b, nvals[i]); error = fabs(int_trap - int_true); ratio = last_error / error; last_error = error; // for next n printf("%8d %22.14e %13.3e %13.3e\n", nvals[i], int_trap, error, ratio); } }
GB_convert_bitmap_worker.c
//------------------------------------------------------------------------------ // GB_convert_bitmap_worker: construct triplets or CSC/CSR from bitmap //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // TODO allow this function to do typecasting. Create 169 different versions // for all 13x13 versions. Use this as part of Method 24, C=A assignment. #include "GB.h" #include "GB_partition.h" #define GB_FREE_ALL \ { \ GB_FREE (W) ; \ } GrB_Info GB_convert_bitmap_worker // extract CSC/CSR or triplets from bitmap ( // outputs: int64_t *GB_RESTRICT Ap, // vector pointers for CSC/CSR form int64_t *GB_RESTRICT Ai, // indices for CSC/CSR or triplet form int64_t *GB_RESTRICT Aj, // vector indices for triplet form GB_void *GB_RESTRICT Ax_new, // values for CSC/CSR or triplet form int64_t *anvec_nonempty, // # of non-empty vectors // inputs: not modified const GrB_Matrix A, // matrix to extract; not modified GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- ASSERT (GB_IS_BITMAP (A)) ; ASSERT (Ap != NULL) ; // must be provided on input, size avdim+1 int64_t *GB_RESTRICT W = NULL ; const int64_t avdim = A->vdim ; const int64_t avlen = A->vlen ; const size_t asize = A->type->size ; //-------------------------------------------------------------------------- // count the entries in each vector //-------------------------------------------------------------------------- const int8_t *GB_RESTRICT Ab = A->b ; GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; int nthreads = GB_nthreads (avlen*avdim, chunk, nthreads_max) ; bool by_vector = (nthreads <= avdim) ; if (by_vector) { //---------------------------------------------------------------------- // compute all vectors in parallel (no workspace) //---------------------------------------------------------------------- int64_t j ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (j = 0 ; j < avdim ; j++) { // ajnz = nnz (A (:,j)) int64_t ajnz = 0 ; int64_t pA_start = j * avlen ; for (int64_t i = 0 ; i < avlen ; i++) { // see if A(i,j) is present in the bitmap int64_t p = i + pA_start ; ajnz += Ab [p] ; ASSERT (Ab [p] == 0 || Ab [p] == 1) ; } Ap [j] = ajnz ; } } else { //---------------------------------------------------------------------- // compute blocks of rows in parallel //---------------------------------------------------------------------- // allocate one row of W per thread, each row of length avdim W = GB_MALLOC (nthreads * avdim, int64_t) ; if (W == NULL) { GB_FREE_ALL ; return (GrB_OUT_OF_MEMORY) ; } int taskid ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (taskid = 0 ; taskid < nthreads ; taskid++) { int64_t *GB_RESTRICT Wtask = W + taskid * avdim ; int64_t istart, iend ; GB_PARTITION (istart, iend, avlen, taskid, nthreads) ; for (int64_t j = 0 ; j < avdim ; j++) { // ajnz = nnz (A (istart:iend-1,j)) int64_t ajnz = 0 ; int64_t pA_start = j * avlen ; for (int64_t i = istart ; i < iend ; i++) { // see if A(i,j) is present in the bitmap int64_t p = i + pA_start ; ajnz += Ab [p] ; ASSERT (Ab [p] == 0 || Ab [p] == 1) ; } Wtask [j] = ajnz ; } } // cumulative sum to compute nnz(A(:,j)) for each vector j int64_t j ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (j = 0 ; j < avdim ; j++) { int64_t ajnz = 0 ; for (int taskid = 0 ; taskid < nthreads ; taskid++) { int64_t *GB_RESTRICT Wtask = W + taskid * avdim ; int64_t c = Wtask [j] ; Wtask [j] = ajnz ; ajnz += c ; } Ap [j] = ajnz ; } } //-------------------------------------------------------------------------- // cumulative sum of Ap //-------------------------------------------------------------------------- int nth = GB_nthreads (avdim, chunk, nthreads_max) ; GB_cumsum (Ap, avdim, anvec_nonempty, nth) ; int64_t anz = Ap [avdim] ; ASSERT (anz == A->nvals) ; //-------------------------------------------------------------------------- // gather the pattern and values from the bitmap //-------------------------------------------------------------------------- // TODO: add type-specific versions for built-in types const GB_void *GB_RESTRICT Ax = A->x ; if (by_vector) { //---------------------------------------------------------------------- // construct all vectors in parallel (no workspace) //---------------------------------------------------------------------- int64_t j ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (j = 0 ; j < avdim ; j++) { // gather from the bitmap into the new A (:,j) int64_t pnew = Ap [j] ; int64_t pA_start = j * avlen ; for (int64_t i = 0 ; i < avlen ; i++) { int64_t p = i + pA_start ; if (Ab [p]) { // A(i,j) is in the bitmap if (Ai != NULL) Ai [pnew] = i ; if (Aj != NULL) Aj [pnew] = j ; if (Ax_new != NULL) { // Ax_new [pnew] = Ax [p]) memcpy (Ax_new +(pnew)*asize, Ax +(p)*asize, asize) ; } pnew++ ; } } ASSERT (pnew == Ap [j+1]) ; } } else { //---------------------------------------------------------------------- // compute blocks of rows in parallel //---------------------------------------------------------------------- int taskid ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (taskid = 0 ; taskid < nthreads ; taskid++) { int64_t *GB_RESTRICT Wtask = W + taskid * avdim ; int64_t istart, iend ; GB_PARTITION (istart, iend, avlen, taskid, nthreads) ; for (int64_t j = 0 ; j < avdim ; j++) { // gather from the bitmap into the new A (:,j) int64_t pnew = Ap [j] + Wtask [j] ; int64_t pA_start = j * avlen ; for (int64_t i = istart ; i < iend ; i++) { // see if A(i,j) is present in the bitmap int64_t p = i + pA_start ; if (Ab [p]) { // A(i,j) is in the bitmap if (Ai != NULL) Ai [pnew] = i ; if (Aj != NULL) Aj [pnew] = j ; if (Ax_new != NULL) { // Ax_new [pnew] = Ax [p] ; memcpy (Ax_new +(pnew)*asize, Ax +(p)*asize, asize); } pnew++ ; } } } } } //-------------------------------------------------------------------------- // free workspace return result //-------------------------------------------------------------------------- GB_FREE (W) ; return (GrB_SUCCESS) ; }
main.c
/* * spike.c * Spike * * Created by Ben Evans on 19/06/2008. * Copyright 2008 University of Oxford. All rights reserved. * */ #include <stdio.h> #include <time.h> #include <stdlib.h> #include <string.h> #include <stdbool.h> #include <unistd.h> #ifdef SERIAL #undef _OPENMP #endif #ifdef _OPENMP #include <omp.h> #endif #include <pwd.h> #include <dlfcn.h> #include "utils.h" #include "globals.h" #include "parameters.h" #include "read_parameters.h" extern int spike(PARAMS * mp); char * RESDIR = RESULTSDIRECTORY; char * DPFILE = DEFAULTPFILE; char * MPFILE = OUTPUTPFILE; char * IMGDIR = IMGDIRECTORY; char * IPFILE = IMGPARAMFILE; char STFILE[BUFSIZ] = ""; // = STIMULIFILE; //char * STFILE = NULL; char PPSTFILE[BUFSIZ] = ""; #define GSLDIR "/opt/local/lib/" // Unnecessary? See checks below // otool -L Spike // to test which dynamic libraries are being used // http://discussions.apple.com/thread.jspa?threadID=1741520 // http://www.cprogramming.com/tutorial/shared-libraries-linux-gcc.html // Apple's malloc debugging library: libgmalloc // To use: export DYLD_INSERT_LIBRARIES=/usr/lib/libgmalloc.dylib PARAMS * mp; unsigned long int seed = 0; gsl_rng * mSeed = NULL; gsl_rng ** states = NULL; int nThreads = 1; char *pFile = NULL; int main (int argc, const char * argv[]) { //int set_error = 0; bool rerun = false; bool compress = true; bool seedFlag = false; int hours = 0; int mins = 0; char *imageArchive = NULL; //char *sfile = NULL; char *cpfile = "CLIparams.m"; FILE * cli_FP = NULL; FILE * pipeFP = NULL; char syscmd[BUFSIZ]; // stdio.h : 1024 char dlver[BUFSIZ]; char *bptr = NULL; int th = 0; float proportion = 1.0; #ifdef _OPENMP bool dynamic = false; #endif bool genNewSeed = false; bool p_flag = false; // Parameter (CLI) flag bool pf_flag = false; // Parameter file flag bool ia_flag = false; // Image archive flag int pINcount = 0; int dcount = 0; // Default parameter count int fcount = 0; // File parameter count int pcount = 0; // Parameter count int ocount = 0; // Parameters out count int err = 0; int slen = 0; char * error = NULL; void * dylib = NULL; bool recFlag = false; char * recList = NULL; bool skip_arg = false; //double wtime, begin; char timeStr[FNAMEBUFF]; int c, result; struct tm *ts; char hostname[FNAMEBUFF]; char schedule[BUFSIZ]; strncpy(schedule, "<default>", BUFSIZ); printf("--------------------------------------------------------------------------------\n"); time_t now = time(NULL); ts = localtime(&now); strftime(timeStr, FNAMEBUFF, "%a %d/%b/%Y %H:%M:%S", ts); err = gethostname(hostname, FNAMEBUFF); //char *user = getenv("USER"); struct passwd *userinfo = getpwuid(geteuid()); char *user = userinfo->pw_name; if (user && err != -1) printf("[%s] : Program started by %s@%s\n", timeStr, user, hostname); char cwd[BUFSIZ]; if (getcwd(cwd, sizeof(cwd)) != NULL) fprintf(stdout, "DIR: %s\n", cwd); // Print directory else perror("getcwd() error"); // Move this section to a seperate header e.g. compiler.h #ifdef _OPENMP #define OMP "T" #else #define OMP "F" #endif #ifdef __GNUC__ // N.B. __GNUC__ is for any compiler implementing GNU compiler extensions, hence is defined for clang and llvm-gcc #ifndef __has_feature #define __has_feature(x) 0 #endif #ifdef __llvm__ // Using LLVM backend // http://clang.llvm.org/docs/LanguageExtensions.html //printf("%d\n",__COUNTER__); #ifdef __clang__ // Using Clang-LLVM // For a list of builtin defines type: clang -x c /dev/null -dM -E printf("Compiler: Clang-LLVM %s\n", __clang_version__); #else // Using GCC-LLVM printf("Compiler: GCC-LLVM %s\n", __VERSION__); #endif // Time of last modification of current source file... printf("Compiled on: %s | Optimization: %d | Debug: %d | OpenMP: %s\n", \ __TIMESTAMP__, __OPTIMIZE__, DEBUG, OMP); #if __has_feature(c_static_assert) // Working? Relevent? printf("Includes support for compile-time assertions\n"); #else fprintf(stderr, "*** Warning: assert() disabled in parallel regions! ***\n"); #endif #else // Using GCC printf("Compiler: %s | Optimization: %d | Debug: %d | OpenMP: %s\n", \ __VERSION__, __OPTIMIZE__, DEBUG, OMP); printf("Source modified on: %s\n",__TIMESTAMP__); printf("Compiled on: %s at %s\n", __DATE__, __TIME__); #endif //#endif // TODO Check this is unnecessary #ifdef NDEBUG fprintf(stderr, "*** Warning: Executing without error checking! ***\n"); #endif if (strcmp(user, "nobody")==0) SIM.Xgrid = true; else if (getenv("OMP_SCHEDULE")) // not NULL string strncpy(schedule, getenv("OMP_SCHEDULE"), BUFSIZ); char * rsfile = RSFILE; printf("--------------------------------------------------------------------------------\n"); printf("Checking for \"%s\" in current directory... \t\t\t [%s]\n",DPFILE,\ (file_exists(DPFILE))?"OK":"NO"); printf("Checking for \"%s\" in current directory... \t\t [%s]\n",rsfile,\ (file_exists(rsfile))?"OK":"NO"); // Check for GSL dylib = dlopen(GSLDIR"libgsl.dylib",RTLD_NOW); printf("Checking %s for GSL dyamic libraries... \t\t\t [%s]\n",GSLDIR,(dylib)?"OK":"NO"); if ((error = dlerror()) != NULL || !dylib) exit_error("main: libgsl.dylib check", error); else // dylib != NULL dlclose(dylib); // Check for System libraries dylib = dlopen("libSystem.dylib", RTLD_NOW); printf("Checking for System dynamic libraries... \t\t\t\t [%s]\n",(dylib)?"OK":"NO"); if ((error = dlerror()) != NULL || !dylib) exit_error("main: libSystem.dylib check", error); else // dylib != NULL dlclose(dylib); // Runtime OpenMP check using int omp_in_parallel(void); #ifdef _OPENMP #pragma omp parallel { #pragma omp master//single { printf("Checking for OpenMP runtime parallelism... \t\t\t\t [%s]\n",\ omp_in_parallel()?"OK":"NO"); } } #endif printf("--------------------------------------------------------------------------------\n"); char exec[BUFSIZ]; strncpy(exec, argv[0], sizeof(exec)-1); if (argc==1) { printf("%s usage:\n",exec); //argv[0]); printf("-c[lean]\t: Clean all dat and tbz files (including image archives!)\n"); printf("-f <filename>\t: Pass parameter filename\n"); printf("-r[erun]\t: Rerun simulation with the random seed in %s\n",rsfile); printf("-g[enerate]\t: Generate new random seed in %s and exit\n",rsfile); printf("-s <seed>\t: Explicitly pass random seed [0, (2^32)-1]\n"); printf("-k <record list>: Pass list of neurons to be recorded\n"); printf("-d[ynamic]\t: Set number of threads to be dynamic\n"); printf("-m <proportion>\t: Set number of threads to a proportion of cores [0.0, 1.0]\n"); printf("-t <threads>\t: Explicitly set the number of threads to use\n"); printf("-p <parameter>\t: Pass a parameter string <name>=<value>\n"); printf("--<parameter>\t: Pass a parameter string <name>=<value>\n"); //printf("-i[mage] <directory>\t: Pass directory of filtered images [***incomplete***]\n"); printf("-j <images>.tbz\t: Pass compressed image archive\n"); printf("-u[ncompressed]\t: Prevent data compression\n"); printf("-x[grid]\t: Set as an Xgrid simulation i.e. print progress information\n"); printf("================================================================================\n"); return 0; } #ifdef __APPLE__ //__unix__ // Output command passed to cmd.sh int a=0; cli_FP = myfopen("cmd.sh", "w"); fprintf(cli_FP, "#!/bin/bash\n"); for (a=0; a<argc; a++) fprintf(cli_FP, "%s ",argv[a]); fprintf(cli_FP, "\n"); fclose(cli_FP); system("chmod 755 cmd.sh"); #endif while (--argc > 0 && (*++argv)[0] == '-') { //skip_arg = 0; while (!skip_arg && (c = *++argv[0])) { switch (c) { case 'c': // Clean directory of .dat and .tbz files system("rm *.dat *.tbz"); break; case 'f': // Parameter file name pf_flag = true; pFile = myalloc(strlen(*++argv)+1); //sizeof(char)==1 guaranteed strcpy(pFile, *argv); skip_arg = true; argc--; break; case 'r': // Rerun with last seed rerun = true; //RERUN = 1; break; case 'g': // Generate a new random seed and exit genNewSeed = true; break; case 's': // Explicitly pass random seed (takes precedence over -r) seed = atol(*++argv); seedFlag = true; skip_arg = true; argc--; break; case 'k': // Read in list of neurons fprintf(stderr, "*** -k: Specifying neurons for recording is not yet implemented! ***\n"); //int * recordSet = NULL; recFlag = true; slen = strlen(*++argv); recList = myalloc(slen+1); //char * tstr = NULL; //int count = 0; strncpy(recList, *argv, slen); recList[slen] = '\0'; // NULL terminate last byte //strtok(list, ";"); //while (tstr && (*trim(tstr) != ('[' || ']')) && (tstr != '\0')) // count = parseIntVector(list, &recordSet); skip_arg = true; argc--; break; case 'd': // May reduce num_thd depending on system load #ifdef _OPENMP dynamic = true; omp_set_dynamic(dynamic); // Can not be used with RNG // if nCores > 4 // if nThreads > nCores-1 -> set nThreads = nCores - 2... #else fprintf(stderr, "*** -d: OpenMP disabled! ***\n"); #endif break; case 'm': // Set the proportion of threads from the CLI [0.0, 1.0] #ifdef _OPENMP proportion = atof(*++argv); //#ifndef __llvm__ assert(0.0 < proportion && proportion <= 1.0); //#endif nThreads = round(omp_get_num_procs()*proportion); nThreads = (nThreads>1) ? nThreads : 1; omp_set_num_threads(nThreads); #else fprintf(stderr, "*** -m %f: OpenMP disabled! ***\n",proportion); #endif skip_arg = true; argc--; break; case 't': // Set the number of threads from the CLI #ifdef _OPENMP nThreads = atoi(*++argv); /*if (nThreads >= omp_get_num_procs()) omp_set_dynamic(true); else omp_set_num_threads(nThreads);*/ omp_set_num_threads(nThreads); if (nThreads >= omp_get_num_procs()) printf("Warning: nThreads (%d) >= nProcessors (%d)!\n",\ nThreads, omp_get_num_procs()); #else fprintf(stderr, "-t %d: OpenMP disabled\n",nThreads); #endif skip_arg = true; argc--; break; case 'p': // Code to pass a parameter string e.g. "param=0" if (!p_flag) { cli_FP = myfopen(cpfile, "w"); p_flag = true; } fprintf(cli_FP, "%s;\n", *++argv); skip_arg = true; argc--; pINcount++; break; case '-': // Equivalent to '-p ' but combines the arguments if (!p_flag) { cli_FP = myfopen(cpfile, "w"); p_flag = true; } fprintf(cli_FP, "%s;\n", ++argv[0]); // Advance to next char address skip_arg = true; //argc--; pINcount++; break; case 'i': // Pass Image directory break; case 'j': // Alternatively pass compressed tar (cjvf) of images ia_flag = true; slen = strlen(*++argv); imageArchive = myalloc(slen+1); strncpy(imageArchive, *argv, slen); imageArchive[slen] = '\0'; // NULL terminate last byte skip_arg = true; argc--; break; case 'u': // Keep data uncompressed compress = false; //printf("Warning: tbz archives should be removed to prevent analysis of them!\n"); break; case 'x': // Xgrid simulation SIM.Xgrid = true; break; default: printf("Illegal arguement: %c\n", c); argc = 0; break; } if (skip_arg) { skip_arg = false; break; } } } #ifdef _OPENMP #pragma omp parallel //private (th_id) { //th_id = omp_get_thread_num(); nThreads = omp_get_num_threads(); //num_thd #pragma omp single { printf("OMP: (%d/%d)\t{OMP_DYNAMIC=%s, OMP_NESTED=%s, OMP_SCHEDULE=%s}\n", \ nThreads, omp_get_num_procs(), \ (omp_get_dynamic() ? "TRUE" : "FALSE"), \ (omp_get_nested() ? "TRUE" : "FALSE"), \ (schedule)); } } #else nThreads = 1; printf("Executing in serial.\n"); #endif FILE * randSeedFP; char * sString, buffer[BUFSIZ]; unsigned long long seedMod = pow(2, 32); // 32-bit unsigned seeds (max value = pow(2, 32)-1) // printf("This program is compiled with GSL version %s.\n", GSL_VERSION); //#if DEBUG > 1 // Print GSL verison and location // system("gsl-config --prefix --version"); //#endif //if (!SIM.Xgrid) //{ // Print GSL verison and location if ((pipeFP = popen("/opt/local/bin/gsl-config --prefix --version", "r"))) { fgets(syscmd, sizeof(syscmd)-1, pipeFP); fgets(dlver, sizeof(dlver)-1, pipeFP); } pclose(pipeFP); if ((bptr = strpbrk(syscmd, "\r\n"))) //strstr(syscmd, '\n') *bptr = '\0'; printf("GSL: Compiled with v%s, found dynamic libraries v%4.2f at: %s\n", \ GSL_VERSION,atof(dlver),syscmd); // if atof(dlver) < GSL_MIN //} // Initialise random seed const gsl_rng_type * T = gsl_rng_default; // Set RNG type gsl_rng_env_setup(); // http://www.gnu.org/software/gsl/manual/html_node/Random-number-environment-variables.html mSeed = gsl_rng_alloc(T); // Used for serial sections with randomness if (genNewSeed) // Generate a new random seed file and exit { seed = (unsigned long) time((time_t *) NULL); seed %= seedMod; randSeedFP = myfopen(rsfile, "w"); fprintf(randSeedFP, "mSeed: \t%ld\n", seed); fclose(randSeedFP); printf("New seed generated in %s: %ld (%s) <GSL v%s>\n", rsfile, seed, gsl_rng_name(mSeed), GSL_VERSION); // Generator type not strictly necessary here return 0; } if (!seedFlag) { if (rerun) // Also rerun with parameters.m? { randSeedFP = myfopen(rsfile, "r"); if ((sString = fgets(buffer, sizeof(buffer), randSeedFP)) != NULL) //while seed = atol(strrchr(sString,':')+1); //ans[count++] fclose(randSeedFP); printf("Rerunning simulation with %s: %ld (%s) <GSL v%s>\n", rsfile, seed, gsl_rng_name(mSeed), GSL_VERSION); } else { seed = (unsigned long) time((time_t *) NULL); seed %= seedMod; fprintf(stderr, "*** Warning: Creating new seed in %s: %ld (%s) <GSL v%s> ***\n", rsfile, seed, gsl_rng_name(mSeed), GSL_VERSION); randSeedFP = myfopen(rsfile, "w"); fprintf(randSeedFP, "mSeed: \t%ld\n", seed); fclose(randSeedFP); } } gsl_rng_set(mSeed, seed); //gsl_rng_set(mSeed, -idum); // Allocate and initialise model parameters structure mp = myalloc(sizeof(*mp)); // Place in getParameters with default init? mp->initialised = false; mp->imgList = NULL; mp->LvExcit = mp->LvInhib = mp->LpEfE = mp->LpElE = mp->LpEI = mp->LpIE = mp->LpII = 0; mp->vExcit = mp->vInhib = mp->vScales = mp->vOrients = mp->vPhases = NULL; mp->pCnxEfE = mp->pCnxElE = mp->pCnxIE = mp->pCnxEI = mp->pCnxII = NULL; mp->layDim = NULL; mp->vSquare = mp->vRecords = NULL; mp->rInhib = 0.0; int syserr = 0; if (ia_flag) //assert(mp->useFilteredImages); { FILEPARTS * fp = myalloc(sizeof(*fp)); getFileParts(imageArchive, fp); slen = strlen(fp->fname); mp->imgDir = myalloc(slen+1); strncpy(mp->imgDir, fp->fname, slen); mp->imgDir[slen] = '\0'; // NULL terminate last byte assert(file_exists(imageArchive)); if (snprintf(syscmd, BUFSIZ, "mkdir %s", mp->imgDir) >= BUFSIZ) fprintf(stderr, "Warning! Undersized buffer: %s", syscmd); if((syserr = system(syscmd))==0) { printf("Now extracting %s...\t", imageArchive); if(snprintf(syscmd, BUFSIZ, "tar -xf %s -C %s/",imageArchive, mp->imgDir) >= BUFSIZ) fprintf(stderr, "*** Warning! Undersized buffer: %s ***", syscmd); syserr = system(syscmd); if (syserr) EE("Error extracting image archive"); else printf("Images successfully extracted to %s\n", mp->imgDir); } } SIM.minTau = BIG; // Read in parameters from .m file printf("I/O: Processing parameters: \"%s\"", !pFile ? DPFILE : pFile); if (p_flag) fclose(cli_FP); dcount = read_parameters(mp, DPFILE); fcount = (pFile != NULL) ? read_parameters(mp, pFile) : 0; pcount = (p_flag) ? read_parameters(mp, cpfile) : 0; if (!mp->useFilteredImages) assert(pcount == pINcount); //printf(" {%d,%d,%d}\tParsing complete!\n", dcount, fcount, pcount); // Print parameters to MPFILE (parameters.m) ocount = printParameters(mp, MPFILE); // Variables to read into Matlab //printf("%d parameters written to %s\n", pcount, MPFILE); printf(" {%d,%d,%d} --> \"%s\" {%d} Done!\n", dcount, fcount, pcount, MPFILE, ocount); // Create a random seed for each thread to ensure thread safety if (mp->noise) // Could add a state to every neuron to achieve same results with different threads { #ifdef _OPENMP #pragma omp parallel { #pragma omp single { omp_set_dynamic(false); // Do not adjust number of threads according to system load } } #endif states = myalloc(nThreads * sizeof(**states)); for (th=0; th<nThreads; th++) { states[th] = gsl_rng_alloc(T); gsl_rng_set(states[th], seed+th+1); } } if (recFlag) // Make recordSet global, prevent random choice in init_network and free at the end { /*int ** recordSet = myalloc(mp->nLayers * sizeof(*recordSet)); char * tstr = NULL; int count = 0; strtok(list, ";"); while (tstr && (*trim(tstr) != ']') && (tstr != '\0')) for (l=0; l<mp->nLayers; l++) { mp->vRecords[l] = parseIntVector(list, &recordSet[l]); tstr = strtok(NULL, ";"); }*/ myfree(recList); } // Print minimum tau and DT to nearest microsecond printf("TAU: Smallest time constant = %.3f ms | DT = %.3f ms\n", SIM.minTau*1000, mp->DT*1000); if (mp->DT >= 2*SIM.minTau) // CHECK THIS fprintf(stderr, "*** Warning: Forward Euler stability condition violated! ***\n"); assert(mp->DT <= 0.001); // Timesteps must be no larger than 1 ms or mp->TSperMS==0! // Display dynamic libraries: otool -L ~/bin/SpikeNet/Debug/Spike #ifdef _OPENMP // Use omp function omp_get_wtime //double begin = omp_get_wtime(); SIM.start = omp_get_wtime(); SIM.elapsed = 0.0; #else time_t start = time(NULL); #endif if (!SIM.Xgrid && !mp->loadWeights) // Remove *.dat and *.tbz { system("rm *.dat"); //system("rm *.dat *.tbz"); if (ia_flag) { if(snprintf(syscmd, BUFSIZ, "find *.tbz ! -name %s -delete",imageArchive) >= BUFSIZ) fprintf(stderr, "*** Warning! Undersized buffer: %s ***", syscmd); if (system(syscmd)) // Delete *.tbz except image archive printf("Archive files successfully cleaned!\n"); else EE("Error cleaning archive files!"); //exit_error("main.c", "Error cleaning archive files!\n"); } else system("rm *.tbz"); // Delete *.tbz } if (mp->loadWeights) { // Pass an archive with all relevant dat files with CLI flag e.g. network.tbz const char * suffix = ""; char fname[FNAMEBUFF]; slen = snprintf(fname, FNAMEBUFF, "L0affNeuronsElE%s.dat", suffix); assert(slen < FNAMEBUFF); if (!file_exists(fname)) { if (file_exists("connectivity.tbz")) system("tar -xvf connectivity.tbz"); else EE("No connectivity files to load"); //exit_error("main", "No connectivity files to load"); } if (mp->nLayers > 1) { slen = snprintf(fname, FNAMEBUFF, "L1affNeuronsEfE%s.dat", suffix); assert(slen < FNAMEBUFF); if (!file_exists(fname)) { if (file_exists("postTraining.tbz")) system("tar -xvf postTraining.tbz"); else EE("No weights files to load"); //exit_error("main", "No weights files to load"); } } } /***** RUN SIMULATION *****/ result = spike(mp); /**************************/ // Compress data files for crash-free xgrid! '-j' Uses bzip (*.tbz equivalent to *.tar.bz2) // Append files to fileList and call system(syscmd); once and keep fileList //snprintf(syscmd, BUFSIZ, "tar -cjvf %s.tbz %s > fileList","connectivity","*affNeurons.dat"); /*if (!SIM.Xgrid) // /sbin/md5 system("md5 *.dat > datHashs.txt");*/ system("shasum *.dat > datHashs.txt"); // /usr/bin/shasum /*snprintf(syscmd, BUFSIZ, "xargs rm < fileList");*/ //--remove-files (remove files after adding them to the archive) : only 10.5 // Check that system() returned 0 (no errors) Bash: echo $? #pragma omp parallel sections private(syserr) // Experimental! { #pragma omp section { if (compress) { printf("\tCompressing data to .tbz archives...\t"); fflush(stdout); if (!(mp->useFilteredImages || mp->stimGroups)) if ((syserr = system("tar -cjf stimuli.tbz *stimuli.dat stimuli.m")) == 0) system("tar -tf stimuli.tbz | xargs rm"); if(mp->nRecordsPL) { if (mp->priorPhases) if ((syserr = system("tar -cjf PPrecords.tbz R*PP_*.dat")) == 0) system("tar -tf PPrecords.tbz | xargs rm"); if ((syserr = system("tar -cjf records.tbz R*.dat")) == 0) system("tar -tf records.tbz | xargs rm"); } if (mp->printConnections) { if (snprintf(syscmd, BUFSIZ, "tar -cjf connectivity.tbz *affNeurons*.dat %s %s",(mp->SOM)?"*dist*.dat":"",(mp->axonDelay)?"*affDelays*.dat":"") >= BUFSIZ) fprintf(stderr, "Warning! Undersized buffer: %s", syscmd); syserr = system(syscmd); /*if (mp->SOM) syserr = system("tar -cjf connectivity.tbz *affNeurons*.dat *affDelays*.dat *dist*.dat"); else { if (mp->axonDelay) syserr = system("tar -cjf connectivity.tbz *affNeurons*.dat *affDelays*.dat"); //system("tar --remove-files -cjvf connectivity.tbz *affNeurons*.dat > fileList"); else syserr = system("tar -cjf connectivity.tbz *affNeurons*.dat"); }*/ if (!syserr) system("tar -tf connectivity.tbz | xargs rm"); } if (mp->pretrain) { if (mp->priorPhases) if ((syserr = system("tar -cjf PPpreTraining.tbz PP_pt*.dat")) == 0) system("tar -tf PPpreTraining.tbz | xargs rm"); if ((syserr = system("tar -cjf preTraining.tbz pt*.dat")) == 0) system("tar -tf preTraining.tbz | xargs rm"); } if (mp->train) { if (mp->priorPhases) if ((syserr = system("tar -cjf PPtraining.tbz PP_E*.dat")) == 0) // 2> tar_err system("tar -tf PPtraining.tbz | xargs rm"); if ((syserr = system("tar -cjf training.tbz E*.dat")) == 0) // 2> tar_err system("tar -tf training.tbz | xargs rm"); } if (mp->priorPhases) if ((syserr = system("tar -cjf PPpostTraining.tbz PP_*.dat")) == 0) // 2> tar_err system("tar -tf PPpostTraining.tbz | xargs rm"); if ((syserr = system("tar -cjf postTraining.tbz L*Spikes.dat L*weights*.dat")) == 0) system("tar -tf postTraining.tbz | xargs rm"); //system(syscmd); //system("rm fileList"); printf("Data Compressed!\n"); fflush(stdout); } //#pragma omp section /*if (!SIM.Xgrid) // Print md5 #'s // /sbin/md5 { //system("md5 Spike"); system("md5 parameters.m"); // shasum system("md5 datHashs.txt"); //system("md5 *.tbz"); // Contains metadata (e.g. timestamps) which will give different #s }*/ printf("Computing SHA checksums...\n"); slen = snprintf(syscmd, sizeof(syscmd)-1, "shasum %s", exec); // To Do: Also print hashes for input files. #ifndef __llvm__ assert(slen < (signed) sizeof(syscmd)); #endif system(syscmd); system("shasum parameters.m"); system("shasum datHashs.txt"); printf("Checksums computed!\n"); } // End of section // Clean up #pragma omp section if (pf_flag) myfree(pFile); #pragma omp section if (ia_flag) { myfree(imageArchive); if(snprintf(syscmd, BUFSIZ, "rm -R %s/", mp->imgDir) >= BUFSIZ) fprintf(stderr, "*** Warning! Undersized buffer: %s ***", syscmd); system(syscmd); // Delete expand image files } // Print out input/output file list? array of structs with a bool and filename string... #pragma omp section { gsl_rng_free(mSeed); if (mp->noise) { for (th=0; th<nThreads; th++) gsl_rng_free(states[th]); // Free all memory associated with generator myfree(states); } } //if (recFlag) // Free list of records } // End of parallel sections if (mp->useFilteredImages) { myfree(mp->imgDir); myfree(mp->imgList); myfree(mp->vScales); myfree(mp->vOrients); myfree(mp->vPhases); } myfree(mp->vRecords); myfree(mp->vExcit); myfree(mp->vInhib); myfree(mp->pCnxEfE); myfree(mp->pCnxElE); myfree(mp->pCnxIE); myfree(mp->pCnxEI); myfree(mp->pCnxII); myfree(mp->layDim); myfree(mp->vSquare); myfree(mp); #ifdef _OPENMP //getTimeString(timeStr, FNAMEBUFF, omp_get_wtime()-begin); double wtime = omp_get_wtime() - SIM.start; //begin; //double integral; //double fraction = modf(wtime, &integral); //duration = (time_t) round(integral); hours = floor(wtime/3600); wtime -= hours*3600; mins = floor(wtime/60); wtime -= mins*60; //secs = wtime - (mins*60) - (hours*3600); snprintf(timeStr, FNAMEBUFF, "%d:%02d:%06.3lf (%d Threads)",\ hours,mins,wtime,nThreads); #else time_t duration = time(NULL) - start; // finish = round(time(NULL) - start); hours = floor(duration/3600); duration -= hours*3600; mins = floor(duration/60); duration -= mins*60; int secs = duration; snprintf(timeStr, FNAMEBUFF, "%d:%02d:%02d (Serial)",hours,mins,secs); #endif if (result==0) printf("Simulation completed in %s!\n",timeStr); else { fprintf(stderr, "*** Simulation aborted after %s! ***\n",timeStr); return 1; } //printf("--------------------------------------------------------------------------------\n"); printf("================================================================================\n"); return 0; }
mod.c
#include <stdio.h> #include <math.h> #include "../sailfish.h" // ============================ COMPAT ======================================== // ============================================================================ #ifdef __ROCM__ #include <hip/hip_runtime.h> #endif #if !defined(__NVCC__) && !defined(__ROCM__) #define __device__ #define __host__ #define EXTERN_C #else #define EXTERN_C extern "C" #endif // ============================ PHYSICS ======================================= // ============================================================================ #define NCONS 3 #define PLM_THETA 1.5 // ============================ MATH ========================================== // ============================================================================ #define real double #define min2(a, b) ((a) < (b) ? (a) : (b)) #define max2(a, b) ((a) > (b) ? (a) : (b)) #define min3(a, b, c) min2(a, min2(b, c)) #define max3(a, b, c) max2(a, max2(b, c)) #define sign(x) copysign(1.0, x) #define minabs(a, b, c) min3(fabs(a), fabs(b), fabs(c)) static __host__ __device__ real plm_gradient_scalar(real yl, real y0, real yr) { real a = (y0 - yl) * PLM_THETA; real b = (yr - yl) * 0.5; real c = (yr - y0) * PLM_THETA; return 0.25 * fabs(sign(a) + sign(b)) * (sign(a) + sign(c)) * minabs(a, b, c); } static __host__ __device__ void plm_gradient(real *yl, real *y0, real *yr, real *g) { for (int q = 0; q < NCONS; ++q) { g[q] = plm_gradient_scalar(yl[q], y0[q], yr[q]); } } // ============================ GRAVITY ======================================= // ============================================================================ static __host__ __device__ real gravitational_potential( struct PointMassList *mass_list, real x1, real y1) { real phi = 0.0; for (int p = 0; p < mass_list->count; ++p) { real x0 = mass_list->masses[p].x; real y0 = mass_list->masses[p].y; real mp = mass_list->masses[p].mass; real rs = mass_list->masses[p].radius; real dx = x1 - x0; real dy = y1 - y0; real r2 = dx * dx + dy * dy; real r2_soft = r2 + rs * rs; phi -= mp / sqrt(r2_soft); } return phi; } static __host__ __device__ void point_mass_source_term( struct PointMass *mass, real x1, real y1, real dt, real *prim, real *delta_cons) { real x0 = mass->x; real y0 = mass->y; real mp = mass->mass; real rs = mass->radius; real sigma = prim[0]; real dx = x1 - x0; real dy = y1 - y0; real r2 = dx * dx + dy * dy; real r2_soft = r2 + rs * rs; real dr = sqrt(r2); real mag = sigma * mp * pow(r2_soft, -1.5); real fx = -mag * dx; real fy = -mag * dy; real sink_rate = 0.0; if (dr < 4.0 * rs) { sink_rate = mass->rate * exp(-pow(dr / rs, 4.0)); } real mdot = sigma * sink_rate * -1.0; switch (mass->model) { case AccelerationFree: delta_cons[0] = dt * mdot; delta_cons[1] = dt * mdot * prim[1] + dt * fx; delta_cons[2] = dt * mdot * prim[2] + dt * fy; break; case TorqueFree: { real vx = prim[1]; real vy = prim[2]; real vx0 = mass->vx; real vy0 = mass->vy; real rhatx = dx / (dr + 1e-12); real rhaty = dy / (dr + 1e-12); real dvdotrhat = (vx - vx0) * rhatx + (vy - vy0) * rhaty; real vxstar = dvdotrhat * rhatx + vx0; real vystar = dvdotrhat * rhaty + vy0; delta_cons[0] = dt * mdot; delta_cons[1] = dt * mdot * vxstar + dt * fx; delta_cons[2] = dt * mdot * vystar + dt * fy; break; } case ForceFree: delta_cons[0] = dt * mdot; delta_cons[1] = dt * fx; delta_cons[2] = dt * fy; break; default: delta_cons[0] = 0.0; delta_cons[1] = 0.0; delta_cons[2] = 0.0; break; } } static __host__ __device__ void point_masses_source_term( struct PointMassList *mass_list, real x1, real y1, real dt, real *prim, real *cons) { for (int p = 0; p < mass_list->count; ++p) { real delta_cons[NCONS]; point_mass_source_term(&mass_list->masses[p], x1, y1, dt, prim, delta_cons); for (int q = 0; q < NCONS; ++q) { cons[q] += delta_cons[q]; } } } // ============================ EOS AND BUFFER ================================ // ============================================================================ static __host__ __device__ real sound_speed_squared( struct EquationOfState *eos, real x, real y, struct PointMassList *mass_list) { switch (eos->type) { case Isothermal: return eos->isothermal.sound_speed_squared; case LocallyIsothermal: return -gravitational_potential(mass_list, x, y) / eos->locally_isothermal.mach_number_squared; default: return 1.0; // WARNING } } static __host__ __device__ void buffer_source_term( struct BoundaryCondition *bc, real xc, real yc, real dt, real *cons) { switch (bc->type) { case Default: case Inflow: break; case KeplerianBuffer: { real rc = sqrt(xc * xc + yc * yc); real surface_density = bc->keplerian_buffer.surface_density; real central_mass = bc->keplerian_buffer.central_mass; real driving_rate = bc->keplerian_buffer.driving_rate; real outer_radius = bc->keplerian_buffer.outer_radius; real onset_width = bc->keplerian_buffer.onset_width; real onset_radius = outer_radius - onset_width; if (rc > onset_radius) { real pf = surface_density * sqrt(central_mass / rc); real px = pf * (-yc / rc); real py = pf * ( xc / rc); real u0[NCONS] = {surface_density, px, py}; real omega_outer = sqrt(central_mass * pow(onset_radius, -3.0)); real buffer_rate = driving_rate * omega_outer * max2(rc, 1.0); for (int q = 0; q < NCONS; ++q) { cons[q] -= (cons[q] - u0[q]) * buffer_rate * dt; } } break; } } } static __host__ __device__ void shear_strain(const real *gx, const real *gy, real dx, real dy, real *s) { real sxx = 4.0 / 3.0 * gx[1] / dx - 2.0 / 3.0 * gy[2] / dy; real syy =-2.0 / 3.0 * gx[1] / dx + 4.0 / 3.0 * gy[2] / dy; real sxy = 1.0 / 1.0 * gx[2] / dx + 1.0 / 1.0 * gy[1] / dy; real syx = sxy; s[0] = sxx; s[1] = sxy; s[2] = syx; s[3] = syy; } // ============================ HYDRO ========================================= // ============================================================================ static __host__ __device__ void conserved_to_primitive(const real *cons, real *prim, real velocity_ceiling) { real rho = cons[0]; real px = cons[1]; real py = cons[2]; real vx = sign(px) * min2(fabs(px / rho), velocity_ceiling); real vy = sign(py) * min2(fabs(py / rho), velocity_ceiling); prim[0] = rho; prim[1] = vx; prim[2] = vy; } static __host__ __device__ void primitive_to_conserved(const real *prim, real *cons) { real rho = prim[0]; real vx = prim[1]; real vy = prim[2]; real px = vx * rho; real py = vy * rho; cons[0] = rho; cons[1] = px; cons[2] = py; } static __host__ __device__ real primitive_to_velocity(const real *prim, int direction) { switch (direction) { case 0: return prim[1]; case 1: return prim[2]; default: return 0.0; } } static __host__ __device__ void primitive_to_flux( const real *prim, const real *cons, real *flux, real cs2, int direction) { real vn = primitive_to_velocity(prim, direction); real rho = prim[0]; real pressure = rho * cs2; flux[0] = vn * cons[0]; flux[1] = vn * cons[1] + pressure * (direction == 0); flux[2] = vn * cons[2] + pressure * (direction == 1); } static __host__ __device__ void primitive_to_outer_wavespeeds( const real *prim, real *wavespeeds, real cs2, int direction) { real cs = sqrt(cs2); real vn = primitive_to_velocity(prim, direction); wavespeeds[0] = vn - cs; wavespeeds[1] = vn + cs; } static __host__ __device__ real primitive_max_wavespeed(const real *prim, real cs2) { real cs = sqrt(cs2); real vx = prim[1]; real vy = prim[2]; real ax = max2(fabs(vx - cs), fabs(vx + cs)); real ay = max2(fabs(vy - cs), fabs(vy + cs)); return max2(ax, ay); } static __host__ __device__ void riemann_hlle(const real *pl, const real *pr, real *flux, real cs2, int direction) { real ul[NCONS]; real ur[NCONS]; real fl[NCONS]; real fr[NCONS]; real al[2]; real ar[2]; primitive_to_conserved(pl, ul); primitive_to_conserved(pr, ur); primitive_to_flux(pl, ul, fl, cs2, direction); primitive_to_flux(pr, ur, fr, cs2, direction); primitive_to_outer_wavespeeds(pl, al, cs2, direction); primitive_to_outer_wavespeeds(pr, ar, cs2, direction); const real am = min3(0.0, al[0], ar[0]); const real ap = max3(0.0, al[1], ar[1]); for (int q = 0; q < NCONS; ++q) { flux[q] = (fl[q] * ap - fr[q] * am - (ul[q] - ur[q]) * ap * am) / (ap - am); } } // ============================ PATCH ========================================= // ============================================================================ #define FOR_EACH(p) \ for (int i = p.start[0]; i < p.start[0] + p.count[0]; ++i) \ for (int j = p.start[1]; j < p.start[1] + p.count[1]; ++j) #define FOR_EACH_OMP(p) \ _Pragma("omp parallel for") \ for (int i = p.start[0]; i < p.start[0] + p.count[0]; ++i) \ for (int j = p.start[1]; j < p.start[1] + p.count[1]; ++j) #define GET(p, i, j) (p.data + p.jumps[0] * ((i) - p.start[0]) + p.jumps[1] * ((j) - p.start[1])) struct Patch { int start[2]; int count[2]; int jumps[2]; int num_fields; real *data; }; static struct Patch patch(struct Mesh mesh, int num_fields, int num_guard, real *data) { struct Patch patch; patch.start[0] = -num_guard; patch.start[1] = -num_guard; patch.count[0] = mesh.ni + 2 * num_guard; patch.count[1] = mesh.nj + 2 * num_guard; patch.jumps[0] = num_fields * patch.count[1]; patch.jumps[1] = num_fields; patch.num_fields = num_fields; patch.data = data; return patch; } // ============================ SCHEME ======================================== // ============================================================================ static __host__ __device__ void primitive_to_conserved_zone( struct Patch primitive, struct Patch conserved, int i, int j) { real *p = GET(primitive, i, j); real *u = GET(conserved, i, j); primitive_to_conserved(p, u); } static __host__ __device__ void advance_rk_zone( struct Mesh mesh, struct Patch conserved_rk, struct Patch primitive_rd, struct Patch primitive_wr, struct EquationOfState eos, struct BoundaryCondition bc, struct PointMassList mass_list, real nu, real a, real dt, real velocity_ceiling, int i, int j) { real dx = mesh.dx; real dy = mesh.dy; real xl = mesh.x0 + (i + 0.0) * dx; real xc = mesh.x0 + (i + 0.5) * dx; real xr = mesh.x0 + (i + 1.0) * dx; real yl = mesh.y0 + (j + 0.0) * dy; real yc = mesh.y0 + (j + 0.5) * dy; real yr = mesh.y0 + (j + 1.0) * dy; // ------------------------------------------------------------------------ // tj // // +-------+-------+-------+ // | | | | // | lr | rj | rr | // | | | | // +-------+-------+-------+ // | | | | // ki | li -|+ c -|+ ri | ti // | | | | // +-------+-------+-------+ // | | | | // | ll | lj | rl | // | | | | // +-------+-------+-------+ // // kj // ------------------------------------------------------------------------ real *un = GET(conserved_rk, i, j); real *pcc = GET(primitive_rd, i, j); real *pli = GET(primitive_rd, i - 1, j); real *pri = GET(primitive_rd, i + 1, j); real *plj = GET(primitive_rd, i, j - 1); real *prj = GET(primitive_rd, i, j + 1); real *pki = GET(primitive_rd, i - 2, j); real *pti = GET(primitive_rd, i + 2, j); real *pkj = GET(primitive_rd, i, j - 2); real *ptj = GET(primitive_rd, i, j + 2); real *pll = GET(primitive_rd, i - 1, j - 1); real *plr = GET(primitive_rd, i - 1, j + 1); real *prl = GET(primitive_rd, i + 1, j - 1); real *prr = GET(primitive_rd, i + 1, j + 1); real plip[NCONS]; real plim[NCONS]; real prip[NCONS]; real prim[NCONS]; real pljp[NCONS]; real pljm[NCONS]; real prjp[NCONS]; real prjm[NCONS]; real gxli[NCONS]; real gxri[NCONS]; real gyli[NCONS]; real gyri[NCONS]; real gxlj[NCONS]; real gxrj[NCONS]; real gylj[NCONS]; real gyrj[NCONS]; real gxcc[NCONS]; real gycc[NCONS]; plm_gradient(pki, pli, pcc, gxli); plm_gradient(pli, pcc, pri, gxcc); plm_gradient(pcc, pri, pti, gxri); plm_gradient(pkj, plj, pcc, gylj); plm_gradient(plj, pcc, prj, gycc); plm_gradient(pcc, prj, ptj, gyrj); plm_gradient(pll, pli, plr, gyli); plm_gradient(prl, pri, prr, gyri); plm_gradient(pll, plj, prl, gxlj); plm_gradient(plr, prj, prr, gxrj); for (int q = 0; q < NCONS; ++q) { plim[q] = pli[q] + 0.5 * gxli[q]; plip[q] = pcc[q] - 0.5 * gxcc[q]; prim[q] = pcc[q] + 0.5 * gxcc[q]; prip[q] = pri[q] - 0.5 * gxri[q]; pljm[q] = plj[q] + 0.5 * gylj[q]; pljp[q] = pcc[q] - 0.5 * gycc[q]; prjm[q] = pcc[q] + 0.5 * gycc[q]; prjp[q] = prj[q] - 0.5 * gyrj[q]; } real fli[NCONS]; real fri[NCONS]; real flj[NCONS]; real frj[NCONS]; real ucc[NCONS]; real cs2li = sound_speed_squared(&eos, xl, yc, &mass_list); real cs2ri = sound_speed_squared(&eos, xr, yc, &mass_list); real cs2lj = sound_speed_squared(&eos, xc, yl, &mass_list); real cs2rj = sound_speed_squared(&eos, xc, yr, &mass_list); riemann_hlle(plim, plip, fli, cs2li, 0); riemann_hlle(prim, prip, fri, cs2ri, 0); riemann_hlle(pljm, pljp, flj, cs2lj, 1); riemann_hlle(prjm, prjp, frj, cs2rj, 1); real sli[4]; real sri[4]; real slj[4]; real srj[4]; real scc[4]; shear_strain(gxli, gyli, dx, dy, sli); shear_strain(gxri, gyri, dx, dy, sri); shear_strain(gxlj, gylj, dx, dy, slj); shear_strain(gxrj, gyrj, dx, dy, srj); shear_strain(gxcc, gycc, dx, dy, scc); fli[1] -= 0.5 * nu * (pli[0] * sli[0] + pcc[0] * scc[0]); // x-x fli[2] -= 0.5 * nu * (pli[0] * sli[1] + pcc[0] * scc[1]); // x-y fri[1] -= 0.5 * nu * (pcc[0] * scc[0] + pri[0] * sri[0]); // x-x fri[2] -= 0.5 * nu * (pcc[0] * scc[1] + pri[0] * sri[1]); // x-y flj[1] -= 0.5 * nu * (plj[0] * slj[2] + pcc[0] * scc[2]); // y-x flj[2] -= 0.5 * nu * (plj[0] * slj[3] + pcc[0] * scc[3]); // y-y frj[1] -= 0.5 * nu * (pcc[0] * scc[2] + prj[0] * srj[2]); // y-x frj[2] -= 0.5 * nu * (pcc[0] * scc[3] + prj[0] * srj[3]); // y-y primitive_to_conserved(pcc, ucc); buffer_source_term(&bc, xc, yc, dt, ucc); point_masses_source_term(&mass_list, xc, yc, dt, pcc, ucc); for (int q = 0; q < NCONS; ++q) { ucc[q] -= ((fri[q] - fli[q]) / dx + (frj[q] - flj[q]) / dy) * dt; ucc[q] = (1.0 - a) * ucc[q] + a * un[q]; } real *pout = GET(primitive_wr, i, j); conserved_to_primitive(ucc, pout, velocity_ceiling); } static __host__ __device__ void advance_rk_zone_inviscid( struct Mesh mesh, struct Patch conserved_rk, struct Patch primitive_rd, struct Patch primitive_wr, struct EquationOfState eos, struct BoundaryCondition bc, struct PointMassList mass_list, real a, real dt, real velocity_ceiling, int i, int j) { real dx = mesh.dx; real dy = mesh.dy; real xl = mesh.x0 + (i + 0.0) * dx; real xc = mesh.x0 + (i + 0.5) * dx; real xr = mesh.x0 + (i + 1.0) * dx; real yl = mesh.y0 + (j + 0.0) * dy; real yc = mesh.y0 + (j + 0.5) * dy; real yr = mesh.y0 + (j + 1.0) * dy; real *un = GET(conserved_rk, i, j); real *pcc = GET(primitive_rd, i, j); real *pli = GET(primitive_rd, i - 1, j); real *pri = GET(primitive_rd, i + 1, j); real *plj = GET(primitive_rd, i, j - 1); real *prj = GET(primitive_rd, i, j + 1); real *pki = GET(primitive_rd, i - 2, j); real *pti = GET(primitive_rd, i + 2, j); real *pkj = GET(primitive_rd, i, j - 2); real *ptj = GET(primitive_rd, i, j + 2); real plip[NCONS]; real plim[NCONS]; real prip[NCONS]; real prim[NCONS]; real pljp[NCONS]; real pljm[NCONS]; real prjp[NCONS]; real prjm[NCONS]; real gxli[NCONS]; real gxri[NCONS]; real gylj[NCONS]; real gyrj[NCONS]; real gxcc[NCONS]; real gycc[NCONS]; plm_gradient(pki, pli, pcc, gxli); plm_gradient(pli, pcc, pri, gxcc); plm_gradient(pcc, pri, pti, gxri); plm_gradient(pkj, plj, pcc, gylj); plm_gradient(plj, pcc, prj, gycc); plm_gradient(pcc, prj, ptj, gyrj); for (int q = 0; q < NCONS; ++q) { plim[q] = pli[q] + 0.5 * gxli[q]; plip[q] = pcc[q] - 0.5 * gxcc[q]; prim[q] = pcc[q] + 0.5 * gxcc[q]; prip[q] = pri[q] - 0.5 * gxri[q]; pljm[q] = plj[q] + 0.5 * gylj[q]; pljp[q] = pcc[q] - 0.5 * gycc[q]; prjm[q] = pcc[q] + 0.5 * gycc[q]; prjp[q] = prj[q] - 0.5 * gyrj[q]; } real fli[NCONS]; real fri[NCONS]; real flj[NCONS]; real frj[NCONS]; real ucc[NCONS]; real cs2li = sound_speed_squared(&eos, xl, yc, &mass_list); real cs2ri = sound_speed_squared(&eos, xr, yc, &mass_list); real cs2lj = sound_speed_squared(&eos, xc, yl, &mass_list); real cs2rj = sound_speed_squared(&eos, xc, yr, &mass_list); riemann_hlle(plim, plip, fli, cs2li, 0); riemann_hlle(prim, prip, fri, cs2ri, 0); riemann_hlle(pljm, pljp, flj, cs2lj, 1); riemann_hlle(prjm, prjp, frj, cs2rj, 1); primitive_to_conserved(pcc, ucc); buffer_source_term(&bc, xc, yc, dt, ucc); point_masses_source_term(&mass_list, xc, yc, dt, pcc, ucc); for (int q = 0; q < NCONS; ++q) { ucc[q] -= ((fri[q] - fli[q]) / dx + (frj[q] - flj[q]) / dy) * dt; ucc[q] = (1.0 - a) * ucc[q] + a * un[q]; } real *pout = GET(primitive_wr, i, j); conserved_to_primitive(ucc, pout, velocity_ceiling); } static __host__ __device__ void point_mass_source_term_zone( struct Mesh mesh, struct Patch primitive, struct Patch cons_rate, struct PointMass mass, int i, int j) { real *pc = GET(primitive, i, j); real *sc = GET(cons_rate, i, j); real x = mesh.x0 + (i + 0.5) * mesh.dx; real y = mesh.y0 + (j + 0.5) * mesh.dy; point_mass_source_term(&mass, x, y, 1.0, pc, sc); } static __host__ __device__ void wavespeed_zone( struct Mesh mesh, struct EquationOfState eos, struct Patch primitive, struct Patch wavespeed, struct PointMassList mass_list, int i, int j) { real *pc = GET(primitive, i, j); real x = mesh.x0 + (i + 0.5) * mesh.dx; real y = mesh.y0 + (j + 0.5) * mesh.dy; real cs2 = sound_speed_squared(&eos, x, y, &mass_list); real a = primitive_max_wavespeed(pc, cs2); GET(wavespeed, i, j)[0] = a; } // ============================ KERNELS ======================================= // ============================================================================ #if defined(__NVCC__) || defined(__ROCM__) static void __global__ primitive_to_conserved_kernel( struct Mesh mesh, struct Patch primitive, struct Patch conserved) { int i = threadIdx.y + blockIdx.y * blockDim.y; int j = threadIdx.x + blockIdx.x * blockDim.x; if (i < mesh.ni && j < mesh.nj) { primitive_to_conserved_zone(primitive, conserved, i, j); } } static void __global__ advance_rk_kernel( struct Mesh mesh, struct Patch conserved_rk, struct Patch primitive_rd, struct Patch primitive_wr, struct EquationOfState eos, struct BoundaryCondition buffer, struct PointMassList mass_list, real nu, real a, real dt, real velocity_ceiling) { int i = threadIdx.y + blockIdx.y * blockDim.y; int j = threadIdx.x + blockIdx.x * blockDim.x; if (i < mesh.ni && j < mesh.nj) { advance_rk_zone( mesh, conserved_rk, primitive_rd, primitive_wr, eos, buffer, mass_list, nu, a, dt, velocity_ceiling, i, j ); } } static void __global__ advance_rk_kernel_inviscid( struct Mesh mesh, struct Patch conserved_rk, struct Patch primitive_rd, struct Patch primitive_wr, struct EquationOfState eos, struct BoundaryCondition buffer, struct PointMassList mass_list, real a, real dt, real velocity_ceiling) { int i = threadIdx.y + blockIdx.y * blockDim.y; int j = threadIdx.x + blockIdx.x * blockDim.x; if (i < mesh.ni && j < mesh.nj) { advance_rk_zone_inviscid( mesh, conserved_rk, primitive_rd, primitive_wr, eos, buffer, mass_list, a, dt, velocity_ceiling, i, j ); } } static void __global__ point_mass_source_term_kernel( struct Mesh mesh, struct Patch primitive, struct Patch cons_rate, struct PointMass mass) { int i = threadIdx.y + blockIdx.y * blockDim.y; int j = threadIdx.x + blockIdx.x * blockDim.x; if (i < mesh.ni && j < mesh.nj) { point_mass_source_term_zone(mesh, primitive, cons_rate, mass, i, j); } } static void __global__ wavespeed_kernel( struct Mesh mesh, struct EquationOfState eos, struct Patch primitive, struct Patch wavespeed, struct PointMassList mass_list) { int i = threadIdx.y + blockIdx.y * blockDim.y; int j = threadIdx.x + blockIdx.x * blockDim.x; if (i < mesh.ni && j < mesh.nj) { wavespeed_zone(mesh, eos, primitive, wavespeed, mass_list, i, j); } } #endif // ============================ PUBLIC API ==================================== // ============================================================================ /** * Converts an array of primitive data to an array of conserved data. The * array index space must follow the descriptions below. * @param mesh The mesh [ni, nj] * @param primitive_ptr[in] [-2, -2] [ni + 4, nj + 4] [3] * @param conserved_ptr[out] [ 0, 0] [ni, nj] [3] * @param mode The execution mode */ EXTERN_C void iso2d_primitive_to_conserved( struct Mesh mesh, real *primitive_ptr, real *conserved_ptr, enum ExecutionMode mode) { struct Patch primitive = patch(mesh, NCONS, 2, primitive_ptr); struct Patch conserved = patch(mesh, NCONS, 0, conserved_ptr); switch (mode) { case CPU: { FOR_EACH(conserved) { primitive_to_conserved_zone(primitive, conserved, i, j); } break; } case OMP: { #ifdef _OPENMP FOR_EACH_OMP(conserved) { primitive_to_conserved_zone(primitive, conserved, i, j); } #endif break; } case GPU: { #if defined(__NVCC__) || defined(__ROCM__) dim3 bs = dim3(16, 16); dim3 bd = dim3((mesh.nj + bs.x - 1) / bs.x, (mesh.ni + bs.y - 1) / bs.y); primitive_to_conserved_kernel<<<bd, bs>>>(mesh, primitive, conserved); #endif break; } } } /** * Updates an array of primitive data by advancing it a single Runge-Kutta * step. * @param mesh The mesh [ni, nj] * @param conserved_rk_ptr[in] [ 0, 0] [ni, nj] [3] * @param primitive_rd_ptr[in] [-2, -2] [ni + 4, nj + 4] [3] * @param primitive_wr_ptr[out] [-2, -2] [ni + 4, nj + 4] [3] * @param eos The EOS * @param buffer The buffer region * @param mass_list A list of point mass objects * @param nu The viscosity coefficient * @param a The RK averaging parameter * @param dt The time step * @param mode The execution mode */ EXTERN_C void iso2d_advance_rk( struct Mesh mesh, real *conserved_rk_ptr, real *primitive_rd_ptr, real *primitive_wr_ptr, struct EquationOfState eos, struct BoundaryCondition buffer, struct PointMassList mass_list, real nu, real a, real dt, real velocity_ceiling, enum ExecutionMode mode) { struct Patch conserved_rk = patch(mesh, NCONS, 0, conserved_rk_ptr); struct Patch primitive_rd = patch(mesh, NCONS, 2, primitive_rd_ptr); struct Patch primitive_wr = patch(mesh, NCONS, 2, primitive_wr_ptr); switch (mode) { case CPU: { if (nu == 0.0) { FOR_EACH(conserved_rk) { advance_rk_zone_inviscid( mesh, conserved_rk, primitive_rd, primitive_wr, eos, buffer, mass_list, a, dt, velocity_ceiling, i, j ); } } else { FOR_EACH(conserved_rk) { advance_rk_zone( mesh, conserved_rk, primitive_rd, primitive_wr, eos, buffer, mass_list, nu, a, dt, velocity_ceiling, i, j); } } break; } case OMP: { #ifdef _OPENMP if (nu == 0.0) { FOR_EACH_OMP(conserved_rk) { advance_rk_zone_inviscid( mesh, conserved_rk, primitive_rd, primitive_wr, eos, buffer, mass_list, a, dt, velocity_ceiling, i, j); } } else { FOR_EACH_OMP(conserved_rk) { advance_rk_zone( mesh, conserved_rk, primitive_rd, primitive_wr, eos, buffer, mass_list, nu, a, dt, velocity_ceiling, i, j); } } break; #endif break; } case GPU: { #if defined(__NVCC__) || defined(__ROCM__) dim3 bs = dim3(16, 16); dim3 bd = dim3((mesh.nj + bs.x - 1) / bs.x, (mesh.ni + bs.y - 1) / bs.y); if (nu == 0.0) { advance_rk_kernel_inviscid<<<bd, bs>>>( mesh, conserved_rk, primitive_rd, primitive_wr, eos, buffer, mass_list, a, dt, velocity_ceiling ); } else { advance_rk_kernel<<<bd, bs>>>( mesh, conserved_rk, primitive_rd, primitive_wr, eos, buffer, mass_list, nu, a, dt, velocity_ceiling ); } #endif break; } } } /** * Fill a buffer with the source terms that would result from a single point * mass. The result is the rate of surface density addition (will be negative * for positive sink rate), and the gravitational force surface densities in * each zone. * @param mesh The mesh [ni, nj] * @param primitive_ptr[in] [-2, -2] [ni + 4, nj + 4] [3] * @param cons_rate_ptr[out] [ 0, 0] [ni, nj] [1] * @param mass A point mass * @param mode The execution mode */ EXTERN_C void iso2d_point_mass_source_term( struct Mesh mesh, real *primitive_ptr, real *cons_rate_ptr, struct PointMass mass, enum ExecutionMode mode) { struct Patch primitive = patch(mesh, NCONS, 2, primitive_ptr); struct Patch cons_rate = patch(mesh, NCONS, 0, cons_rate_ptr); switch (mode) { case CPU: { FOR_EACH(cons_rate) { point_mass_source_term_zone(mesh, primitive, cons_rate, mass, i, j); } break; } case OMP: { #ifdef _OPENMP FOR_EACH_OMP(cons_rate) { point_mass_source_term_zone(mesh, primitive, cons_rate, mass, i, j); } #endif break; } case GPU: { #if defined(__NVCC__) || defined(__ROCM__) dim3 bs = dim3(16, 16); dim3 bd = dim3((mesh.nj + bs.x - 1) / bs.x, (mesh.ni + bs.y - 1) / bs.y); point_mass_source_term_kernel<<<bd, bs>>>(mesh, primitive, cons_rate, mass); #endif break; } } } /** * Fill a buffer with the maximum wavespeed in each zone. * @param mesh The mesh [ni, nj] * @param primitive_ptr[in] [-2, -2] [ni + 4, nj + 4] [3] * @param wavespeed_ptr[out] [ 0, 0] [ni, nj] [1] * @param eos The EOS * @param mass_list A list of point mass objects * @param mode The execution mode */ EXTERN_C void iso2d_wavespeed( struct Mesh mesh, real *primitive_ptr, real *wavespeed_ptr, struct EquationOfState eos, struct PointMassList mass_list, enum ExecutionMode mode) { struct Patch primitive = patch(mesh, NCONS, 2, primitive_ptr); struct Patch wavespeed = patch(mesh, 1, 0, wavespeed_ptr); switch (mode) { case CPU: { FOR_EACH(wavespeed) { wavespeed_zone(mesh, eos, primitive, wavespeed, mass_list, i, j); } break; } case OMP: { #ifdef _OPENMP FOR_EACH_OMP(wavespeed) { wavespeed_zone(mesh, eos, primitive, wavespeed, mass_list, i, j); } #endif break; } case GPU: { #if defined(__NVCC__) || defined(__ROCM__) dim3 bs = dim3(16, 16); dim3 bd = dim3((mesh.nj + bs.x - 1) / bs.x, (mesh.ni + bs.y - 1) / bs.y); wavespeed_kernel<<<bd, bs>>>(mesh, eos, primitive, wavespeed, mass_list); #endif break; } } } /** * Obtain the maximum value in an array of double's, using either a sequential * or an OpenMP reduction. Not implemented for GPU execution. * * @param data The data [size] * @param size The number of elements * @param mode The execution mode */ EXTERN_C real iso2d_maximum( real *data, unsigned long size, enum ExecutionMode mode) { real a_max = 0.0; switch (mode) { case CPU: { for (unsigned long i = 0; i < size; ++i) { a_max = max2(a_max, data[i]); } break; } case OMP: { #ifdef _OPENMP #pragma omp parallel for reduction(max:a_max) for (unsigned long i = 0; i < size; ++i) { a_max = max2(a_max, data[i]); } #endif break; } case GPU: break; // Not implemented, use iso2d_wavespeed // followed by a GPU reduction. } return a_max; }
GB_binop__lor_uint64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__lor_uint64 // A.*B function (eWiseMult): GB_AemultB__lor_uint64 // A*D function (colscale): GB_AxD__lor_uint64 // D*A function (rowscale): GB_DxB__lor_uint64 // C+=B function (dense accum): GB_Cdense_accumB__lor_uint64 // C+=b function (dense accum): GB_Cdense_accumb__lor_uint64 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__lor_uint64 // C=scalar+B GB_bind1st__lor_uint64 // C=scalar+B' GB_bind1st_tran__lor_uint64 // C=A+scalar GB_bind2nd__lor_uint64 // C=A'+scalar GB_bind2nd_tran__lor_uint64 // C type: uint64_t // A type: uint64_t // B,b type: uint64_t // BinaryOp: cij = ((aij != 0) || (bij != 0)) #define GB_ATYPE \ uint64_t #define GB_BTYPE \ uint64_t #define GB_CTYPE \ uint64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint64_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = ((x != 0) || (y != 0)) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LOR || GxB_NO_UINT64 || GxB_NO_LOR_UINT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__lor_uint64 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__lor_uint64 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__lor_uint64 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint64_t uint64_t bwork = (*((uint64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__lor_uint64 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *GB_RESTRICT Cx = (uint64_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__lor_uint64 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *GB_RESTRICT Cx = (uint64_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__lor_uint64 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__lor_uint64 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__lor_uint64 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t x = (*((uint64_t *) x_input)) ; uint64_t *Bx = (uint64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; uint64_t bij = Bx [p] ; Cx [p] = ((x != 0) || (bij != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__lor_uint64 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t *Ax = (uint64_t *) Ax_input ; uint64_t y = (*((uint64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint64_t aij = Ax [p] ; Cx [p] = ((aij != 0) || (y != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = Ax [pA] ; \ Cx [pC] = ((x != 0) || (aij != 0)) ; \ } GrB_Info GB_bind1st_tran__lor_uint64 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t x = (*((const uint64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = Ax [pA] ; \ Cx [pC] = ((aij != 0) || (y != 0)) ; \ } GrB_Info GB_bind2nd_tran__lor_uint64 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t y = (*((const uint64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
transforms.c
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <complex.h> #include <omp.h> #include "logger.h" //void morlet(int ndata, int n_nu, int n_eta, double conv_ext, double fourier_b, // double *data, double *nu, double *eta, double complex *out){ // // Discrete Morlet Wavelet transform, using Morlet basis from Goupillaud 1984 (Eq. 5, 6 - with b=2pi) // // int ix, jnuc,jeta, jnu, thisn; // double exponent, mag, extent, dt; // // dt = nu[1] - nu[0]; // // double sqrt2 = sqrt(2.0); // int index = 0; // // for (ix=0;ix<ndata;ix++){ // for (jnuc=0;jnuc<n_nu;jnuc++){ // for (jeta=0; jeta<n_eta;jeta++){ // extent = 1/(eta[jeta]*sqrt2); // thisn = ceil(conv_ext*extent/dt); // // for (jnu=fmax(0, jnuc-thisn); jnu<fmin(jnuc+thisn, n_nu); jnu++){ // exponent = eta[jeta]*(nu[jnu] - nu[jnuc]); // out[index] += data[ix*n_nu + jnu]*cexp(-exponent*(exponent/2 + fourier_b*I)); // } // index++; // } // } // } //} inline int max(int a, int b) { return a > b ? a : b; } inline int min(int a, int b) { return a < b ? a : b; } void cmorlet(unsigned int ndata, unsigned int n_nu, unsigned int n_eta, double conv_ext, double fourier_b, double complex *data, double *nu, double *eta, int nthreads, double complex *out){ /* Discrete Morlet Wavelet transform ================================= Uses Morlet basis from Goupillaud 1984 (Eq. 5, 6 - with b=2pi) Notes ----- The SHAPE of any of the below args indicates the *ordering* of the raveled array, with last axis moving first. Args ---- ndata (int) : Number of different data sets to be transformed (each is independent) n_nu (int) : Number of real-space cells (eg. frequencies, in terms of visibilities) n_eta (int) : Number of fourier-space cells to transform to, should be ~1/2 n_nu conv_ext (double) : Convergence extent. The number of Morlet kernel sigma "widths" to actually perform integration for. Should be ~5 or more. fourier_b (double) : The Fourier convention, i.e. the Fourier kernel is e^{-bi nu*eta}. data (double complex, SHAPE=[n_nu, ndata]) : The input (complex) data. nu (double, SHAPE=[n_nu]): Real-space co-ordinates (i.e. frequencies, in terms of visibilities) nthreads (int) : Number of threads to use in OMP. eta (double, SHAPE=[n_eta]): Fourier-space co-ordinates (dual of nu). Returns ------- out (double complex, SHAPE=[n_eta, n_nu, ndata]): The resulting Morlet transform. */ unsigned int ix, jnuc,jeta, jnu, jidx, jmin, jmax; double exponent; double complex xx; int thisn; double sqrt2 = sqrt(2.0); unsigned int out_idx = 0; unsigned int data_idx = 0; double sqrt2dnu = sqrt2*(nu[1] - nu[0]); omp_set_num_threads(nthreads); #pragma omp parallel for private(thisn, jidx, out_idx, jnuc, jmin, jmax, data_idx, exponent, xx, jnu, ix) for (jeta=0; jeta<n_eta;jeta++){ // Loop through eta thisn = ceil(conv_ext/(eta[jeta]*sqrt2dnu)); // We do this to be able to multi-thread jidx = jeta * n_nu * ndata; out_idx = 0; LOG_DEBUG("jeta=%d, jidx=%d, thisn=%d", jeta, jidx, thisn); for (jnuc=0;jnuc<n_nu;jnuc++){ // Loop through nu_centre jmin = max(0, jnuc-thisn); jmax = min(jnuc+thisn, n_nu); data_idx = jmin*ndata; LOG_SUPER_DEBUG("jnuc=%d, jmin=%d, jmax=%d", jnuc, jmin, jmax); for (jnu=jmin; jnu<jmax; jnu++){ // Loop through nu (i.e. do the FT) exponent = eta[jeta]*(nu[jnu] - nu[jnuc]); xx = cexp(-exponent*(exponent/2 + fourier_b*I)); for (ix=0;ix<ndata;ix++){ // Loop through different data // if(jidx + out_idx >= n_eta*n_nu*ndata){ // printf("Out of bounds on: jeta=%d, jnuc=%d, jnu=%d, ix=%d, jidx=%d, out_idx=%d\n", jeta, jnuc, jnu, ix, jidx, out_idx); // } out[jidx + out_idx] += data[data_idx]*xx; if(jeta==(n_eta-1) && jnuc==(n_nu-1) && ix==(ndata-1)) LOG_ULTRA_DEBUG("\t\tjnu=%d ix=%d indx=%d jidx=%d, out_idx=%d, data=%g + %gi xx=%g out=%g + %gi", jnu, ix, jidx+out_idx, jidx, out_idx, creal(data[data_idx]), cimag(data[data_idx]), xx, creal(out[jidx + out_idx]), cimag(out[jidx + out_idx])); data_idx++; out_idx++; } out_idx -= ndata; // out_idx should not contain jnu, so reset it. } out_idx += ndata; } } } void BlackmanHarris_cmorlet(unsigned int ndata, unsigned int n_nu, unsigned int n_eta, double conv_ext, double *BlackmanHarrisFilter, double complex *data, double *nu, double *eta, int nthreads, double complex *out){ /* Discrete Morlet Wavelet transform but replacing Gaussian filter with BlackmanHarris. BlackmanHarris form taken from scipy.signal.windown.blackmanharris (copied below) https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.windows.blackmanharris.html#scipy.signal.windows.blackmanharris Otherwise as above. ================================= */ unsigned int ix, jnuc,jeta, jnu, jidx, jmin, jmax; double exponent; double complex xx; int thisn; double sqrt2 = sqrt(2.0); unsigned int out_idx = 0; unsigned int data_idx = 0; double sqrt2dnu = sqrt2*(nu[1] - nu[0]); omp_set_num_threads(nthreads); #pragma omp parallel for private(thisn, jidx, out_idx, jnuc, jmin, jmax, data_idx, exponent, xx, jnu, ix) for (jeta=0; jeta<n_eta;jeta++){ // Loop through eta thisn = ceil(conv_ext/(eta[jeta]*sqrt2dnu)); // We do this to be able to multi-thread jidx = jeta * n_nu * ndata; out_idx = 0; LOG_DEBUG("jeta=%d, jidx=%d, thisn=%d", jeta, jidx, thisn); for (jnuc=0;jnuc<n_nu;jnuc++){ // Loop through nu_centre jmin = max(0, jnuc-thisn); jmax = min(jnuc+thisn, n_nu); data_idx = jmin*ndata; LOG_SUPER_DEBUG("jnuc=%d, jmin=%d, jmax=%d", jnuc, jmin, jmax); for (jnu=jmin; jnu<jmax; jnu++){ // Loop through nu (i.e. do the FT) exponent = eta[jeta]*(nu[jnu] - nu[jnuc]); //xx = cexp(-exponent*(exponent/2 + fourier_b*I)); // --Filter change Fourier to blackmanharris. //general_cosine(M, [0.35875, 0.48829, 0.14128, 0.01168], sym) xx = cexp(-exponent*exponent/2 - BlackmanHarrisFilter[jnuc]); for (ix=0;ix<ndata;ix++){ // Loop through different data // if(jidx + out_idx >= n_eta*n_nu*ndata){ // printf("Out of bounds on: jeta=%d, jnuc=%d, jnu=%d, ix=%d, jidx=%d, out_idx=%d\n", jeta, jnuc, jnu, ix, jidx, out_idx); // } out[jidx + out_idx] += data[data_idx]*xx; if(jeta==(n_eta-1) && jnuc==(n_nu-1) && ix==(ndata-1)) LOG_ULTRA_DEBUG("\t\tjnu=%d ix=%d indx=%d jidx=%d, out_idx=%d, data=%g + %gi xx=%g out=%g + %gi", jnu, ix, jidx+out_idx, jidx, out_idx, creal(data[data_idx]), cimag(data[data_idx]), xx, creal(out[jidx + out_idx]), cimag(out[jidx + out_idx])); data_idx++; out_idx++; } out_idx -= ndata; // out_idx should not contain jnu, so reset it. } out_idx += ndata; } } } /* def blackmanharris(M, sym=True): """Return a minimum 4-term Blackman-Harris window. Parameters ---------- M : int Number of points in the output window. If zero or less, an empty array is returned. sym : bool, optional When True (default), generates a symmetric window, for use in filter design. When False, generates a periodic window, for use in spectral analysis. Returns ------- w : ndarray The window, with the maximum value normalized to 1 (though the value 1 does not appear if `M` is even and `sym` is True). Examples -------- Plot the window and its frequency response: >>> from scipy import signal >>> from scipy.fft import fft, fftshift >>> import matplotlib.pyplot as plt >>> window = signal.windows.blackmanharris(51) >>> plt.plot(window) >>> plt.title("Blackman-Harris window") >>> plt.ylabel("Amplitude") >>> plt.xlabel("Sample") >>> plt.figure() >>> A = fft(window, 2048) / (len(window)/2.0) >>> freq = np.linspace(-0.5, 0.5, len(A)) >>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max()))) >>> plt.plot(freq, response) >>> plt.axis([-0.5, 0.5, -120, 0]) >>> plt.title("Frequency response of the Blackman-Harris window") >>> plt.ylabel("Normalized magnitude [dB]") >>> plt.xlabel("Normalized frequency [cycles per sample]") """ return general_cosine(M, [0.35875, 0.48829, 0.14128, 0.01168], sym) def general_cosine(M, a, sym=True): r""" Generic weighted sum of cosine terms window Parameters ---------- M : int Number of points in the output window a : array_like Sequence of weighting coefficients. This uses the convention of being centered on the origin, so these will typically all be positive numbers, not alternating sign. sym : bool, optional When True (default), generates a symmetric window, for use in filter design. When False, generates a periodic window, for use in spectral analysis. References ---------- .. [1] A. Nuttall, "Some windows with very good sidelobe behavior," IEEE Transactions on Acoustics, Speech, and Signal Processing, vol. 29, no. 1, pp. 84-91, Feb 1981. :doi:`10.1109/TASSP.1981.1163506`. .. [2] Heinzel G. et al., "Spectrum and spectral density estimation by the Discrete Fourier transform (DFT), including a comprehensive list of window functions and some new flat-top windows", February 15, 2002 https://holometer.fnal.gov/GH_FFT.pdf Examples -------- Heinzel describes a flat-top window named "HFT90D" with formula: [2]_ .. math:: w_j = 1 - 1.942604 \cos(z) + 1.340318 \cos(2z) - 0.440811 \cos(3z) + 0.043097 \cos(4z) where .. math:: z = \frac{2 \pi j}{N}, j = 0...N - 1 Since this uses the convention of starting at the origin, to reproduce the window, we need to convert every other coefficient to a positive number: >>> HFT90D = [1, 1.942604, 1.340318, 0.440811, 0.043097] The paper states that the highest sidelobe is at -90.2 dB. Reproduce Figure 42 by plotting the window and its frequency response, and confirm the sidelobe level in red: >>> from scipy.signal.windows import general_cosine >>> from scipy.fft import fft, fftshift >>> import matplotlib.pyplot as plt >>> window = general_cosine(1000, HFT90D, sym=False) >>> plt.plot(window) >>> plt.title("HFT90D window") >>> plt.ylabel("Amplitude") >>> plt.xlabel("Sample") >>> plt.figure() >>> A = fft(window, 10000) / (len(window)/2.0) >>> freq = np.linspace(-0.5, 0.5, len(A)) >>> response = np.abs(fftshift(A / abs(A).max())) >>> response = 20 * np.log10(np.maximum(response, 1e-10)) >>> plt.plot(freq, response) >>> plt.axis([-50/1000, 50/1000, -140, 0]) >>> plt.title("Frequency response of the HFT90D window") >>> plt.ylabel("Normalized magnitude [dB]") >>> plt.xlabel("Normalized frequency [cycles per sample]") >>> plt.axhline(-90.2, color='red') >>> plt.show() """ if _len_guards(M): return np.ones(M) M, needs_trunc = _extend(M, sym) fac = np.linspace(-np.pi, np.pi, M) w = np.zeros(M) for k in range(len(a)): w += a[k] * np.cos(k * fac) return _truncate(w, needs_trunc) */
reduction.h
// Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved. #ifndef __DACE_REDUCTION_H #define __DACE_REDUCTION_H #include <cstdint> #include "types.h" #include "math.h" // for ::min, ::max #ifdef __CUDACC__ #include "../../../external/cub/cub/device/device_segmented_reduce.cuh" #include "../../../external/cub/cub/device/device_reduce.cuh" #include "../../../external/cub/cub/block/block_reduce.cuh" #include "../../../external/cub/cub/iterator/counting_input_iterator.cuh" #include "../../../external/cub/cub/iterator/transform_input_iterator.cuh" #endif #ifdef __HIPCC__ // HIP supports the same set of atomic ops as CUDA SM 6.0+ #define DACE_USE_GPU_ATOMICS #define DACE_USE_GPU_DOUBLE_ATOMICS #elif defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 300 #define DACE_USE_GPU_ATOMICS #if __CUDA_ARCH__ >= 600 #define DACE_USE_GPU_DOUBLE_ATOMICS #endif #endif // Specializations for reductions implemented in frameworks like OpenMP, MPI namespace dace { // Internal type. See below for wcr_fixed external type, which selects // the implementation according to T's properties. template <ReductionType REDTYPE, typename T> struct _wcr_fixed { static DACE_HDFI T reduce_atomic(T *ptr, const T& value); DACE_HDFI T operator()(const T &a, const T &b) const; }; // Custom reduction with a lambda function template <typename T> struct wcr_custom { template <typename WCR> static DACE_HDFI T reduce_atomic(WCR wcr, T *ptr, const T& value) { // The slowest kind of atomic operations (locked/compare-and-swap), // this should only happen in case of unrecognized lambdas T old; #ifdef DACE_USE_GPU_ATOMICS // Adapted from CUDA's pre-v8.0 double atomicAdd implementation T assumed; old = *ptr; do { assumed = old; old = atomicCAS(ptr, assumed, wcr(assumed, value)); } while (assumed != old); #else #pragma omp critical { old = *ptr; *ptr = wcr(old, value); } #endif return old; } // Non-conflicting version --> no critical section template <typename WCR> static DACE_HDFI T reduce(WCR wcr, T *ptr, const T& value) { T old = *ptr; *ptr = wcr(old, value); return old; } }; // Specialization of CAS for float and double template <> struct wcr_custom<float> { template <typename WCR> static DACE_HDFI float reduce_atomic(WCR wcr, float *ptr, const float& value) { // The slowest kind of atomic operations (locked/compare-and-swap), // this should only happen in case of unrecognized lambdas #ifdef DACE_USE_GPU_ATOMICS // Adapted from CUDA's pre-v8.0 double atomicAdd implementation int *iptr = (int *)ptr; int old = *iptr, assumed; do { assumed = old; old = atomicCAS(iptr, assumed, __float_as_int(wcr(__int_as_float(assumed), value))); } while (assumed != old); return __int_as_float(old); #else float old; #pragma omp critical { old = *ptr; *ptr = wcr(old, value); } return old; #endif } // Non-conflicting version --> no critical section template <typename WCR> static DACE_HDFI float reduce(WCR wcr, float *ptr, const float& value) { float old = *ptr; *ptr = wcr(old, value); return old; } }; template <> struct wcr_custom<double> { template <typename WCR> static DACE_HDFI double reduce_atomic(WCR wcr, double *ptr, const double& value) { // The slowest kind of atomic operations (locked/compare-and-swap), // this should only happen in case of unrecognized lambdas #ifdef DACE_USE_GPU_ATOMICS // Adapted from CUDA's pre-v8.0 double atomicAdd implementation unsigned long long *iptr = (unsigned long long *)ptr; unsigned long long old = *ptr, assumed; do { assumed = old; old = atomicCAS( iptr, assumed, __double_as_longlong( wcr(__longlong_as_double(assumed), value))); } while (assumed != old); return __longlong_as_double(old); #else double old; #pragma omp critical { old = *ptr; *ptr = wcr(old, value); } return old; #endif } // Non-conflicting version --> no critical section template <typename WCR> static DACE_HDFI double reduce(WCR wcr, double *ptr, const double& value) { double old; *ptr = wcr(old, value); return old; } }; // End of specialization template <typename T> struct _wcr_fixed<ReductionType::Sum, T> { static DACE_HDFI T reduce_atomic(T *ptr, const T& value) { #ifdef DACE_USE_GPU_ATOMICS return atomicAdd(ptr, value); #elif defined (_OPENMP) && _OPENMP >= 201107 T old; #pragma omp atomic capture { old = *ptr; *ptr += value; } return old; #else #pragma omp atomic *ptr += value; return T(0); // Unsupported #endif } DACE_HDFI T operator()(const T &a, const T &b) const { return a + b; } }; // Implementation of double atomicAdd for CUDA architectures prior to 6.0 #if defined(DACE_USE_GPU_ATOMICS) && !defined(DACE_USE_GPU_DOUBLE_ATOMICS) template <> struct _wcr_fixed<ReductionType::Sum, double> { static DACE_HDFI double reduce_atomic(double *ptr, const double& value) { unsigned long long int* address_as_ull = (unsigned long long int*)ptr; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(value + __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); } DACE_HDFI double operator()(const double &a, const double &b) const { return a + b; } }; #endif template <typename T> struct _wcr_fixed<ReductionType::Product, T> { static DACE_HDFI T reduce_atomic(T *ptr, const T& value) { #ifdef DACE_USE_GPU_ATOMICS return wcr_custom<T>::reduce( _wcr_fixed<ReductionType::Product, T>(), ptr, value); #elif defined (_OPENMP) && _OPENMP >= 201107 T old; #pragma omp atomic capture { old = *ptr; *ptr *= value; } return old; #else #pragma omp atomic *ptr *= value; return T(0); // Unsupported #endif } DACE_HDFI T operator()(const T &a, const T &b) const { return a * b; } }; template <typename T> struct _wcr_fixed<ReductionType::Min, T> { static DACE_HDFI T reduce_atomic(T *ptr, const T& value) { #ifdef DACE_USE_GPU_ATOMICS return atomicMin(ptr, value); #else return wcr_custom<T>::reduce_atomic( _wcr_fixed<ReductionType::Min, T>(), ptr, value); #endif } DACE_HDFI T operator()(const T &a, const T &b) const { return ::min(a, b); } }; template <typename T> struct _wcr_fixed<ReductionType::Max, T> { static DACE_HDFI T reduce_atomic(T *ptr, const T& value) { #ifdef DACE_USE_GPU_ATOMICS return atomicMax(ptr, value); #else return wcr_custom<T>::reduce_atomic( _wcr_fixed<ReductionType::Max, T>(), ptr, value); #endif } DACE_HDFI T operator()(const T &a, const T &b) const { return ::max(a, b); } }; // Specialization for floating point types template <> struct _wcr_fixed<ReductionType::Min, float> { static DACE_HDFI float reduce_atomic(float *ptr, const float& value) { return wcr_custom<float>::reduce_atomic( _wcr_fixed<ReductionType::Min, float>(), ptr, value); } DACE_HDFI float operator()(const float &a, const float &b) const { return ::min(a, b); } }; template <> struct _wcr_fixed<ReductionType::Max, float> { static DACE_HDFI float reduce_atomic(float *ptr, const float& value) { return wcr_custom<float>::reduce_atomic( _wcr_fixed<ReductionType::Max, float>(), ptr, value); } DACE_HDFI float operator()(const float &a, const float &b) const { return ::max(a, b); } }; template <> struct _wcr_fixed<ReductionType::Min, double> { static DACE_HDFI double reduce_atomic(double *ptr, const double& value) { return wcr_custom<double>::reduce_atomic( _wcr_fixed<ReductionType::Min, double>(), ptr, value); } DACE_HDFI double operator()(const double &a, const double &b) const { return ::min(a, b); } }; template <> struct _wcr_fixed<ReductionType::Max, double> { static DACE_HDFI double reduce_atomic(double *ptr, const double& value) { return wcr_custom<double>::reduce_atomic( _wcr_fixed<ReductionType::Max, double>(), ptr, value); } DACE_HDFI double operator()(const double &a, const double &b) const { return ::max(a, b); } }; // End of specialization template <typename T> struct _wcr_fixed<ReductionType::Logical_And, T> { static DACE_HDFI T reduce_atomic(T *ptr, const T& value) { #ifdef DACE_USE_GPU_ATOMICS return atomicAnd(ptr, value ? T(1) : T(0)); #elif defined (_OPENMP) && _OPENMP >= 201107 T old; T val = (value ? T(1) : T(0)); #pragma omp atomic capture { old = *ptr; *ptr &= val; } return old; #else T val = (value ? T(1) : T(0)); #pragma omp atomic *ptr &= val; return T(0); // Unsupported #endif } DACE_HDFI T operator()(const T &a, const T &b) const { return a && b; } }; template <typename T> struct _wcr_fixed<ReductionType::Bitwise_And, T> { static DACE_HDFI T reduce_atomic(T *ptr, const T& value) { #ifdef DACE_USE_GPU_ATOMICS return atomicAnd(ptr, value); #elif defined (_OPENMP) && _OPENMP >= 201107 T old; #pragma omp atomic capture { old = *ptr; *ptr &= value; } return old; #else #pragma omp atomic *ptr &= value; return T(0); // Unsupported #endif } DACE_HDFI T operator()(const T &a, const T &b) const { return a & b; } }; template <typename T> struct _wcr_fixed<ReductionType::Logical_Or, T> { static DACE_HDFI T reduce_atomic(T *ptr, const T& value) { #ifdef DACE_USE_GPU_ATOMICS return atomicOr(ptr, value ? T(1) : T(0)); #elif defined (_OPENMP) && _OPENMP >= 201107 T old; T val = (value ? T(1) : T(0)); #pragma omp atomic capture { old = *ptr; *ptr |= val; } return old; #else T val = (value ? T(1) : T(0)); #pragma omp atomic *ptr |= val; return T(0); // Unsupported #endif } DACE_HDFI T operator()(const T &a, const T &b) const { return a || b; } }; template <typename T> struct _wcr_fixed<ReductionType::Bitwise_Or, T> { static DACE_HDFI T reduce_atomic(T *ptr, const T& value) { #ifdef DACE_USE_GPU_ATOMICS return atomicOr(ptr, value); #elif defined (_OPENMP) && _OPENMP >= 201107 T old; #pragma omp atomic capture { old = *ptr; *ptr |= value; } return old; #else #pragma omp atomic *ptr |= value; return T(0); // Unsupported #endif } DACE_HDFI T operator()(const T &a, const T &b) const { return a | b; } }; template <typename T> struct _wcr_fixed<ReductionType::Logical_Xor, T> { static DACE_HDFI T reduce_atomic(T *ptr, const T& value) { #ifdef DACE_USE_GPU_ATOMICS return atomicXor(ptr, value ? T(1) : T(0)); #elif defined (_OPENMP) && _OPENMP >= 201107 T old; T val = (value ? T(1) : T(0)); #pragma omp atomic capture { old = *ptr; *ptr ^= val; } return old; #else T val = (value ? T(1) : T(0)); #pragma omp atomic *ptr ^= val; return T(0); // Unsupported #endif } DACE_HDFI T operator()(const T &a, const T &b) const { return a != b; } }; template <typename T> struct _wcr_fixed<ReductionType::Bitwise_Xor, T> { static DACE_HDFI T reduce_atomic(T *ptr, const T& value) { #ifdef DACE_USE_GPU_ATOMICS return atomicXor(ptr, value); #elif defined (_OPENMP) && _OPENMP >= 201107 T old; #pragma omp atomic capture { old = *ptr; *ptr ^= value; } return old; #else #pragma omp atomic *ptr ^= value; return T(0); // Unsupported #endif } DACE_HDFI T operator()(const T &a, const T &b) const { return a ^ b; } }; template <typename T> struct _wcr_fixed<ReductionType::Exchange, T> { static DACE_HDFI T reduce_atomic(T *ptr, const T& value) { #ifdef DACE_USE_GPU_ATOMICS return atomicExch(ptr, value); #else T old; #pragma omp critical { old = *ptr; *ptr = value; } return old; #endif } DACE_HDFI T operator()(const T &a, const T &b) const { return b; } }; ////////////////////////////////////////////////////////////////////////// // Specialization that regresses to critical section / locked update for // unsupported types template<typename T> using EnableIfScalar = typename std::enable_if<std::is_scalar<T>::value>::type; // Any vector type that is not of length 1, or struct/complex types // do not support atomics. In these cases, we regress to locked updates. template <ReductionType REDTYPE, typename T, typename SFINAE = void> struct wcr_fixed { static DACE_HDFI T reduce(T *ptr, const T& value) { T old = *ptr; *ptr = _wcr_fixed<REDTYPE, T>()(old, value); return old; } static DACE_HDFI T reduce_atomic(T *ptr, const T& value) { return wcr_custom<T>::template reduce_atomic( _wcr_fixed<REDTYPE, T>(), ptr, value); } }; // When atomics are supported, use _wcr_fixed normally template <ReductionType REDTYPE, typename T> struct wcr_fixed<REDTYPE, T, EnableIfScalar<T> > { static DACE_HDFI T reduce(T *ptr, const T& value) { T old = *ptr; *ptr = _wcr_fixed<REDTYPE, T>()(old, value); return old; } static DACE_HDFI T reduce_atomic(T *ptr, const T& value) { return _wcr_fixed<REDTYPE, T>::reduce_atomic(ptr, value); } DACE_HDFI T operator()(const T &a, const T &b) const { return _wcr_fixed<REDTYPE, T>()(a, b); } }; #ifdef __CUDACC__ struct StridedIteratorHelper { explicit StridedIteratorHelper(size_t stride) : stride(stride) {} size_t stride; __host__ __device__ __forceinline__ size_t operator()(const size_t &index) const { return index * stride; } }; inline auto stridedIterator(size_t stride) { cub::CountingInputIterator<int> counting_iterator(0); StridedIteratorHelper conversion_op(stride); cub::TransformInputIterator<int, decltype(conversion_op), decltype(counting_iterator)> itr(counting_iterator, conversion_op); return itr; } #endif } // namespace dace #endif // __DACE_REDUCTION_H
laplace2d.c
/* * Copyright 2017 NVIDIA Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <math.h> #include <string.h> #include <openacc.h> #include "timer.h" #define NN 4096 #define NM 4096 double A[NN][NM]; double Anew[NN][NM]; int main(int argc, char** argv) { const int n = NN; const int m = NM; const int iter_max = 1000; const double tol = 1.0e-6; double error = 1.0; memset(A, 0, n * m * sizeof(double)); memset(Anew, 0, n * m * sizeof(double)); for (int j = 0; j < n; j++) { A[j][0] = 1.0; Anew[j][0] = 1.0; } printf("Jacobi relaxation Calculation: %d x %d mesh\n", n, m); StartTimer(); int iter = 0; while ( error > tol && iter < iter_max ) { error = 0.0; #pragma omp parallel for shared(m, n, Anew, A) #pragma acc kernels for( int j = 1; j < n-1; j++) { for( int i = 1; i < m-1; i++ ) { Anew[j][i] = 0.25 * ( A[j][i+1] + A[j][i-1] + A[j-1][i] + A[j+1][i]); error = fmax( error, fabs(Anew[j][i] - A[j][i])); } } #pragma omp parallel for shared(m, n, Anew, A) #pragma acc kernels for( int j = 1; j < n-1; j++) { for( int i = 1; i < m-1; i++ ) { A[j][i] = Anew[j][i]; } } if(iter % 100 == 0) printf("%5d, %0.6f\n", iter, error); iter++; } double runtime = GetTimer(); printf(" total: %f s\n", runtime / 1000); return 0; }
main.c
#include <omp.h> #include <stdio.h> #include <stdlib.h> #include <stdbool.h> #include <time.h> int main(int argc, char **argv) { int i, j; double start, end; //check time double globalStart, globalEnd; int **A, **B; int N; //size for matrix A[N][N], B[N][N] int t; //the number of threads int chunk; //the size of lines each thread has to work with int sum; //a temp sum that the threads use to store the row's sum values except the diagonal one int max; //max abs diagonal value of A matrix int localMin; //localMin is used from each thread to store the min value of each chunk int minLocalIndexI, minLocalIndexJ; //local indexes for min value of B matrix int min; //min value of B matrix int minIndexI, minIndexJ; //indexes for min value of B matrix bool isSDD = true; //a bool value used to determine if the A matrix is a Strictly diagonally dominant printf("Give size of matrix: "); scanf("%d", &N); printf("Give number of Thread: "); scanf("%d", &t); //set number of thread omp_set_num_threads(t); //malloc array A = malloc(sizeof(int*)*N); B = malloc(sizeof(int*)*N); for(i = 0 ; i < N; i++){ A[i] = malloc(sizeof(int)*N); B[i] = malloc(sizeof(int)*N); } //read array for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { printf("Give the A[%d][%d]: ", i+1, j+1); scanf("%d", &A[i][j]); } } globalStart = omp_get_wtime(); start = omp_get_wtime(); #pragma omp parallel shared(A, N, chunk, isSDD) private(i, j, sum) { chunk = N / omp_get_num_threads(); #pragma omp for schedule(dynamic, chunk) for(i = 0; i < N; i++) { //if a thread has found that the condition is not meet //skip the rest loops if(!isSDD) { continue; } //init sum for every line sum = 0; for(j = 0; j < N; j++) { if(i != j) { sum += abs(A[i][j]); } } //check the condition for strictly diagonally approaching if(abs(A[i][i]) <= sum) { //if 2 or more threads try to change the value of isSCD its going to be the value "false" //no critical condition isSDD = false; } } } end = omp_get_wtime(); printf("\nA.a Threads %d, Time: %.4f\n", t, end - start); if(isSDD) { printf("\nThe A matrix is a strictly diagonally dominant\n"); start = omp_get_wtime(); #pragma omp parallel for private(i) reduction(max: max) for (i = 0; i < N; i++) { if (max < abs(A[i][i])) max = abs(A[i][i]); } end = omp_get_wtime(); printf("\nA.b Threads %d, Time: %.4f\n", t, end - start); printf("\nThe max value is: %d\n", max); //create B array based on max value start = omp_get_wtime(); #pragma omp parallel shared(A, B, N, chunk, max) private(i, j) { chunk = N / omp_get_num_threads(); #pragma omp for schedule(static, chunk) for(i = 0; i < N; i++){ for(j = 0; j < N; j++){ if(i != j){ B[i][j] = max - abs(A[i][j]); } else{ B[i][j] = max; } } } } end = omp_get_wtime(); printf("\nA.c Threads %d, Time: %.4f\n", t, end - start); printf("\nArray B: \n"); for(i = 0; i < 5; i++){ for(j = 0; j < 5; j++){ printf(" %d", B[i][j]); } printf("\n"); } start = omp_get_wtime(); #pragma omp parallel shared(B, N, chunk, min, minIndexI, minIndexJ) private(localMin, minLocalIndexI, minLocalIndexJ, i, j) { chunk = N / omp_get_num_threads(); min = localMin = B[0][0]; minIndexI = minLocalIndexI = 0; minIndexJ = minLocalIndexJ = 0; #pragma omp for schedule(static, chunk) //init min and localMin for(i = 0; i < N; i++) { for(j = 0; j < N; j++) { if (B[i][j] < localMin){ localMin = B[i][j]; minLocalIndexI = i; minLocalIndexJ = j; } } } #pragma omp critical (find_min) { if (localMin < min){ min = localMin; minIndexI = minLocalIndexI; minIndexJ = minLocalIndexJ; } } } end = omp_get_wtime(); printf("\nA.d Threads %d, Time: %.4f\n", t, end - start); printf("\nThe min is B[%d][%d] = %d\n", minIndexI, minIndexJ, min); } else { //if the matrix is not a SDD then, the below message is printed and the programm ends printf("\nThe A matrix is not a strictly diagonally dominant\n"); } globalEnd = omp_get_wtime(); printf("\nAll program Threads %d, Time: %.4f\n", t, globalEnd - globalStart); //free memory for(i=0;i<N;i++) { free(A[i]); free(B[i]); } free(A); free(B); return 0; }
coordinate_transformation_utilities.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: // // #ifndef KRATOS_COORDINATE_TRANSFORMATION_UTILITIES_H #define KRATOS_COORDINATE_TRANSFORMATION_UTILITIES_H // system includes // external includes #include "boost/numeric/ublas/matrix_proxy.hpp" // kratos includes #include "includes/define.h" #include "includes/node.h" #include "containers/variable.h" #include "geometries/geometry.h" namespace Kratos { ///@addtogroup KratosCore ///@{ ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /// A utility to rotate the local contributions of certain nodes to the system matrix, which is required to apply slip conditions in arbitrary directions. template<class TLocalMatrixType, class TLocalVectorType, class TValueType> class CoordinateTransformationUtils { public: ///@name Type Definitions ///@{ /// Pointer definition of CoordinateTransformationUtils KRATOS_CLASS_POINTER_DEFINITION(CoordinateTransformationUtils); typedef Node<3> NodeType; typedef Geometry< Node<3> > GeometryType; // typedef boost::numeric::ublas::matrix_row<TLocalMatrixType> LocalRowType; // // typedef boost::numeric::ublas::matrix_range<TLocalMatrixType> MatrixBlockType; ///@} ///@name Life Cycle ///@{ /// Constructor. /** @param DomainSize Number of space dimensions (2 or 3) * @param NumRowsPerNode Number of matrix or vector rows associated to each node. Velocity DOFs are assumed to be the first mDomainSize rows in each block of rows. * @param rSelectionFlag All nodes where the flag given by this argument is set to true will be transformed to a rotated coordinate system. */ CoordinateTransformationUtils(const unsigned int DomainSize, const unsigned int NumRowsPerNode, const Kratos::Flags& rSelectionFlag = SLIP): mDomainSize(DomainSize), mBlockSize(NumRowsPerNode), mrFlag(rSelectionFlag) {} /// Destructor. virtual ~CoordinateTransformationUtils() {} ///@} ///@name Operators ///@{ ///@} ///@name Operations ///@{ /** * @brief Calculates rotation operator for given point * * This metod calculates rotation matrix for a given point. Nodal NORMAL variable should be * assigned properly since rotation is calculated based on it. * * @param rRotationMatrix Output rotation matrix * @param rThisPoint Current node */ virtual void CalculateRotationOperatorPure( TLocalMatrixType& rRotationMatrix, const GeometryType::PointType& rThisPoint) const { KRATOS_TRY if (mDomainSize == 2) { BoundedMatrix<double, 2, 2> local_matrix; this->LocalRotationOperatorPure(local_matrix, rThisPoint); if (rRotationMatrix.size1() != 2 || rRotationMatrix.size2() != 2) { rRotationMatrix.resize(2, 2, false); } noalias(rRotationMatrix) = local_matrix; } else if (mDomainSize == 3) { BoundedMatrix<double, 3, 3> local_matrix; this->LocalRotationOperatorPure(local_matrix, rThisPoint); if (rRotationMatrix.size1() != 3 || rRotationMatrix.size2() != 3) { rRotationMatrix.resize(3, 3, false); } noalias(rRotationMatrix) = local_matrix; } else { KRATOS_ERROR << "Unsupported domain size [ mDomainSize = " << mDomainSize << " ].\n"; } KRATOS_CATCH(""); } /** * @brief Calculates rotation nodal matrix shape sensitivities * * This method calculates shape sensitivities of rotation matrix for given node. * Nodal NORMAL(historical data container) and NORMAL_SHAPE_SENSITIVITY(non-historical data contaienr) variables * should be properly initialized. * * NORMAL_SHAPE_SENSITIVITY matrix should be properly sized and initialized with proper shape sensitivity values * rows: number_of_nodes contributing to NORMAL * DOMAIN_SIZE, columns: DOMAIN_SIZE * * @param rRotationMatrixShapeDerivative Output shape sensitivities matrix w.r.t. NodeIndex and DerivativeIndex * @param DerivativeNodeIndex NodeIndex for which shape sensitivity matrix is computed * @param DerivativeDirectionIndex Direction index of the node for which shape sensitivity matrix is computed * @param rThisPoint Current node where rotation matrix shape sensitivities are required */ virtual void CalculateRotationOperatorPureShapeSensitivities( TLocalMatrixType& rRotationMatrixShapeDerivative, const std::size_t DerivativeNodeIndex, const std::size_t DerivativeDirectionIndex, const GeometryType::PointType& rThisPoint) const { KRATOS_TRY if (mDomainSize == 2) { BoundedMatrix<double, 2, 2> local_matrix; this->CalculateRotationOperatorPureShapeSensitivities( local_matrix, DerivativeNodeIndex, DerivativeDirectionIndex, rThisPoint); if (rRotationMatrixShapeDerivative.size1() != 2 || rRotationMatrixShapeDerivative.size2() != 2) { rRotationMatrixShapeDerivative.resize(2, 2, false); } noalias(rRotationMatrixShapeDerivative) = local_matrix; } else if (mDomainSize == 3) { BoundedMatrix<double, 3, 3> local_matrix; this->CalculateRotationOperatorPureShapeSensitivities( local_matrix, DerivativeNodeIndex, DerivativeDirectionIndex, rThisPoint); if (rRotationMatrixShapeDerivative.size1() != 3 || rRotationMatrixShapeDerivative.size2() != 3) { rRotationMatrixShapeDerivative.resize(3, 3, false); } noalias(rRotationMatrixShapeDerivative) = local_matrix; } else { KRATOS_ERROR << "Unsupported domain size [ mDomainSize = " << mDomainSize << " ].\n"; } KRATOS_CATCH(""); } /** * @brief Calculate 2d rotation nodal matrix shape sensitivities * * This method calculates shape sensitivities of 2D rotation matrix for given node. * Nodal NORMAL(historical data container) and NORMAL_SHAPE_SENSITIVITY(non-historical data contaienr) variables * should be properly initialized. * * NORMAL_SHAPE_SENSITIVITY matrix should be properly sized and initialized with proper shape sensitivity values * rows: (number_of_neighbour_nodes + 1) * 2 * cols: 2 * * @param rOutput Output shape sensitivities matrix w.r.t. NodeIndex and DerivativeIndex * @param DerivativeNodeIndex NodeIndex for which shape sensitivity matrix is computed * @param DerivativeDirectionIndex Direction index of the node for which shape sensitivity matrix is computed * @param rThisPoint Current node where rotation matrix shape sensitivities are required */ virtual void CalculateRotationOperatorPureShapeSensitivities( BoundedMatrix<double, 2, 2>& rOutput, const std::size_t DerivativeNodeIndex, const std::size_t DerivativeDirectionIndex, const GeometryType::PointType& rThisPoint) const { KRATOS_TRY KRATOS_ERROR_IF(!rThisPoint.SolutionStepsDataHas(NORMAL)) << "NORMAL is not found in node at " << rThisPoint.Coordinates() << "."; KRATOS_ERROR_IF(!rThisPoint.Has(NORMAL_SHAPE_DERIVATIVE)) << "NORMAL_SHAPE_DERIVATIVE is not found in node at " << rThisPoint.Coordinates() << "."; const array_1d<double, 3>& r_nodal_normal = rThisPoint.FastGetSolutionStepValue(NORMAL); const double nodal_normal_magnitude = norm_2(r_nodal_normal); KRATOS_ERROR_IF(nodal_normal_magnitude == 0.0) << "NORMAL at node " << rThisPoint.Coordinates() << " is not properly initialized."; const Matrix& r_sensitivity_values = rThisPoint.GetValue(NORMAL_SHAPE_DERIVATIVE); KRATOS_DEBUG_ERROR_IF(r_sensitivity_values.size2() != 2) << "NORMAL_SHAPE_DERIVATIVE is not properly initialized at node " << rThisPoint.Coordinates() << " to calculate 2D rotation operator shape sensitivities. [ required number of columns = 2, available number of columns = " << r_sensitivity_values.size2() << " ]."; const std::size_t require_rows = (DerivativeNodeIndex + 1) * 2; KRATOS_DEBUG_ERROR_IF(r_sensitivity_values.size1() < require_rows) << "NORMAL_SHAPE_DERIVATIVE is not properly initialized at node " << rThisPoint.Coordinates() << " to calculate 2D rotation operator shape sensitivities. [ required number of rows >= " << require_rows << ", available number of rows = " << r_sensitivity_values.size1() << " ]."; const Vector& r_nodal_normal_derivatives = row(r_sensitivity_values, DerivativeNodeIndex * 2 + DerivativeDirectionIndex); rOutput(0, 0) = r_nodal_normal_derivatives[0] / nodal_normal_magnitude; rOutput(0, 1) = r_nodal_normal_derivatives[1] / nodal_normal_magnitude; rOutput(1, 0) = -r_nodal_normal_derivatives[1] / nodal_normal_magnitude; rOutput(1, 1) = r_nodal_normal_derivatives[0] / nodal_normal_magnitude; const double nodal_normal_magnitude_derivative = (r_nodal_normal[0] * r_nodal_normal_derivatives[0] + r_nodal_normal[1] * r_nodal_normal_derivatives[1]) / nodal_normal_magnitude; const double coeff = nodal_normal_magnitude_derivative / (std::pow(nodal_normal_magnitude, 2)); rOutput(0, 0) -= r_nodal_normal[0] * coeff; rOutput(0, 1) -= r_nodal_normal[1] * coeff; rOutput(1, 0) -= -r_nodal_normal[1] * coeff; rOutput(1, 1) -= r_nodal_normal[0] * coeff; KRATOS_CATCH(""); } /** * @brief Calculate 3d rotation nodal matrix shape sensitivities * * This method calculates shape sensitivities of 3D rotation matrix for given node. * Nodal NORMAL(historical data container) and NORMAL_SHAPE_SENSITIVITY(non-historical data contaienr) variables * should be properly initialized. * * NORMAL_SHAPE_SENSITIVITY matrix should be properly sized and initialized with proper shape sensitivity values * rows: (number_of_neighbour_nodes + 1) * 3 * cols: 3 * * @param rOutput Output shape sensitivities matrix w.r.t. NodeIndex and DerivativeIndex * @param DerivativeNodeIndex NodeIndex for which shape sensitivity matrix is computed * @param DerivativeDirectionIndex Direction index of the node for which shape sensitivity matrix is computed * @param rThisPoint Current node where rotation matrix shape sensitivities are required */ virtual void CalculateRotationOperatorPureShapeSensitivities( BoundedMatrix<double, 3, 3>& rOutput, const std::size_t DerivativeNodeIndex, const std::size_t DerivativeDirectionIndex, const GeometryType::PointType& rThisPoint) const { KRATOS_TRY KRATOS_ERROR_IF(!rThisPoint.SolutionStepsDataHas(NORMAL)) << "NORMAL is not found in node at " << rThisPoint.Coordinates() << "."; KRATOS_ERROR_IF(!rThisPoint.Has(NORMAL_SHAPE_DERIVATIVE)) << "NORMAL_SHAPE_DERIVATIVE is not found in node at " << rThisPoint.Coordinates() << "."; const array_1d<double, 3>& r_nodal_normal = rThisPoint.FastGetSolutionStepValue(NORMAL); const double nodal_normal_magnitude = norm_2(r_nodal_normal); KRATOS_ERROR_IF(nodal_normal_magnitude == 0.0) << "NORMAL at node " << rThisPoint.Coordinates() << " is not properly initialized."; const Matrix& r_sensitivity_values = rThisPoint.GetValue(NORMAL_SHAPE_DERIVATIVE); KRATOS_DEBUG_ERROR_IF(r_sensitivity_values.size2() != 3) << "NORMAL_SHAPE_DERIVATIVE is not properly initialized at node " << rThisPoint.Coordinates() << " to calculate 3D rotation operator shape sensitivities. [ required number of columns = 3, available number of columns = " << r_sensitivity_values.size2() << " ]."; const std::size_t require_rows = (DerivativeNodeIndex + 1) * 3; KRATOS_DEBUG_ERROR_IF(r_sensitivity_values.size1() < require_rows) << "NORMAL_SHAPE_DERIVATIVE is not properly initialized at node " << rThisPoint.Coordinates() << " to calculate 3D rotation operator shape sensitivities. [ required number of rows >= " << require_rows << ", available number of rows = " << r_sensitivity_values.size1() << " ]."; const Vector& r_nodal_normal_derivative = row(r_sensitivity_values, DerivativeNodeIndex * 3 + DerivativeDirectionIndex); const double nodal_normal_magnitude_derivative = VectorNormDerivative(nodal_normal_magnitude, r_nodal_normal, r_nodal_normal_derivative); const array_1d<double, 3>& unit_normal = r_nodal_normal / nodal_normal_magnitude; const array_1d<double, 3>& unit_normal_derivative = UnitVectorDerivative(nodal_normal_magnitude, nodal_normal_magnitude_derivative, r_nodal_normal, r_nodal_normal_derivative); rOutput(0, 0) = unit_normal_derivative[0]; rOutput(0, 1) = unit_normal_derivative[1]; rOutput(0, 2) = unit_normal_derivative[2]; array_1d<double, 3> rT1(3, 0.0); rT1[0] = 1.0; double dot = unit_normal[0]; double dot_derivative = unit_normal_derivative[0]; if (std::abs(dot) > 0.99) { rT1[0] = 0.0; rT1[1] = 1.0; dot = unit_normal[1]; dot_derivative = unit_normal_derivative[1]; } // calculate rT1 noalias(rT1) -= unit_normal * dot; const double rT1_norm = norm_2(rT1); const array_1d<double, 3>& unit_rT1 = rT1 / rT1_norm; // calculate rT1 derivative const array_1d<double, 3>& rT1_derivative = (unit_normal_derivative * dot + unit_normal * dot_derivative) * -1.0; // calculate rT1 norm derivative const double rT1_norm_derivative = VectorNormDerivative(rT1_norm, rT1, rT1_derivative); const array_1d<double, 3>& unit_rT1_derivative = UnitVectorDerivative(rT1_norm, rT1_norm_derivative, rT1, rT1_derivative); rOutput(1, 0) = unit_rT1_derivative[0]; rOutput(1, 1) = unit_rT1_derivative[1]; rOutput(1, 2) = unit_rT1_derivative[2]; rOutput(2, 0) = unit_normal_derivative[1] * unit_rT1[2] + unit_normal[1] * unit_rT1_derivative[2] - unit_normal_derivative[2] * unit_rT1[1] - unit_normal[2] * unit_rT1_derivative[1]; rOutput(2, 1) = unit_normal_derivative[2] * unit_rT1[0] + unit_normal[2] * unit_rT1_derivative[0] - unit_normal_derivative[0] * unit_rT1[2] - unit_normal[0] * unit_rT1_derivative[2]; rOutput(2, 2) = unit_normal_derivative[0] * unit_rT1[1] + unit_normal[0] * unit_rT1_derivative[1] - unit_normal_derivative[1] * unit_rT1[0] - unit_normal[1] * unit_rT1_derivative[0]; KRATOS_CATCH(""); } /// Rotate the local system contributions so that they are oriented with each node's normal. /** @param rLocalMatrix Local system matrix @param rLocalVector Local RHS vector @param rGeometry A reference to the element's (or condition's) geometry */ virtual void Rotate(TLocalMatrixType& rLocalMatrix, TLocalVectorType& rLocalVector, GeometryType& rGeometry) const { if(mBlockSize != mDomainSize) //Monolithic case { if(mDomainSize == 2) RotateAux<2,3>(rLocalMatrix,rLocalVector,rGeometry); if(mDomainSize == 3) RotateAux<3,4>(rLocalMatrix,rLocalVector,rGeometry); } else //fractional step case { if(mDomainSize == 2) RotateAuxPure<2>(rLocalMatrix,rLocalVector,rGeometry); if(mDomainSize == 3) RotateAuxPure<3>(rLocalMatrix,rLocalVector,rGeometry); } } /// RHS only version of Rotate virtual void Rotate(TLocalVectorType& rLocalVector, GeometryType& rGeometry) const { //const unsigned int LocalSize = rLocalVector.size(); // We expect this to work both with elements (4 nodes) and conditions (3 nodes) unsigned int Index = 0; if (rLocalVector.size() > 0) { if(mBlockSize != mDomainSize) //Monolithic case { for(unsigned int j = 0; j < rGeometry.PointsNumber(); ++j) { if( this->IsSlip(rGeometry[j]) ) { if(mDomainSize == 3) { array_1d<double,4> aux,aux1; BoundedMatrix<double,4,4> rRot; LocalRotationOperator3D<4>(rRot,rGeometry[j]); for(unsigned int k=0; k<4; k++) aux[k] = rLocalVector[j*mBlockSize+k]; noalias(aux1) = prod(rRot,aux); for(unsigned int k=0; k<4; k++) rLocalVector[j*mBlockSize+k] = aux1[k]; } else { array_1d<double,3> aux,aux1; BoundedMatrix<double,3,3> rRot; LocalRotationOperator2D<3>(rRot,rGeometry[j]); for(unsigned int k=0; k<3; k++) { aux[k] = rLocalVector[j*mBlockSize+k]; } noalias(aux1) = prod(rRot,aux); for(unsigned int k=0; k<3; k++) rLocalVector[j*mBlockSize+k] = aux1[k]; } } Index += mBlockSize; } } else //fractional step case { for(unsigned int j = 0; j < rGeometry.PointsNumber(); ++j) { if( this->IsSlip(rGeometry[j]) ) { if(mDomainSize == 3) { array_1d<double,3> aux,aux1; BoundedMatrix<double,3,3> rRot; LocalRotationOperatorPure(rRot,rGeometry[j]); for(unsigned int k=0; k<3; k++) aux[k] = rLocalVector[j*mBlockSize+k]; noalias(aux1) = prod(rRot,aux); for(unsigned int k=0; k<3; k++) rLocalVector[j*mBlockSize+k] = aux1[k]; } else { array_1d<double,2> aux,aux1; BoundedMatrix<double,2,2> rRot; LocalRotationOperatorPure(rRot,rGeometry[j]); for(unsigned int k=0; k<2; k++) aux[k] = rLocalVector[j*mBlockSize+k]; noalias(aux1) = prod(rRot,aux); for(unsigned int k=0; k<2; k++) rLocalVector[j*mBlockSize+k] = aux1[k]; } } Index += mBlockSize; } } } } /// Apply slip boundary conditions to the rotated local contributions. /** This function takes the local system contributions rotated so each node's velocities are expressed using a base oriented with its normal and imposes that the normal velocity is equal to the mesh velocity in the normal direction. */ virtual void ApplySlipCondition(TLocalMatrixType& rLocalMatrix, TLocalVectorType& rLocalVector, GeometryType& rGeometry) const { const unsigned int LocalSize = rLocalVector.size(); // We expect this to work both with elements (4 nodes) and conditions (3 nodes) if (LocalSize > 0) { for(unsigned int itNode = 0; itNode < rGeometry.PointsNumber(); ++itNode) { if( this->IsSlip(rGeometry[itNode])) { // We fix the first dof (normal velocity) for each rotated block unsigned int j = itNode * mBlockSize; //const double k = rLocalMatrix(j,j)+rLocalMatrix(j,j+1)+rLocalMatrix(j,j+2); // If the mesh is moving, we must impose v_normal = vmesh_normal array_1d<double,3> VMesh = rGeometry[itNode].FastGetSolutionStepValue(MESH_VELOCITY); VMesh -= rGeometry[itNode].FastGetSolutionStepValue(VELOCITY); array_1d<double,3> rN = rGeometry[itNode].FastGetSolutionStepValue(NORMAL); this->Normalize(rN); for( unsigned int i = 0; i < j; ++i)// Skip term (i,i) { rLocalMatrix(i,j) = 0.0; rLocalMatrix(j,i) = 0.0; } for( unsigned int i = j+1; i < LocalSize; ++i) { rLocalMatrix(i,j) = 0.0; rLocalMatrix(j,i) = 0.0; } rLocalVector(j) = inner_prod(rN,VMesh); rLocalMatrix(j,j) = 1.0; } } } } /// RHS only version of ApplySlipCondition virtual void ApplySlipCondition(TLocalVectorType& rLocalVector, GeometryType& rGeometry) const { if (rLocalVector.size() > 0) { for(unsigned int itNode = 0; itNode < rGeometry.PointsNumber(); ++itNode) { if( this->IsSlip(rGeometry[itNode]) ) { // We fix the first dof (normal velocity) for each rotated block unsigned int j = itNode * mBlockSize; // If the mesh is moving, we must impose v_normal = vmesh_normal array_1d<double,3> VMesh = rGeometry[itNode].FastGetSolutionStepValue(MESH_VELOCITY); VMesh -= rGeometry[itNode].FastGetSolutionStepValue(VELOCITY); array_1d<double,3> rN = rGeometry[itNode].FastGetSolutionStepValue(NORMAL); this->Normalize(rN); rLocalVector[j] = inner_prod(rN,VMesh); } } } } /// Transform nodal velocities to the rotated coordinates (aligned with each node's normal) virtual void RotateVelocities(ModelPart& rModelPart) const { TLocalVectorType Vel(mDomainSize); TLocalVectorType Tmp(mDomainSize); ModelPart::NodeIterator it_begin = rModelPart.NodesBegin(); #pragma omp parallel for firstprivate(Vel,Tmp) for(int iii=0; iii<static_cast<int>(rModelPart.Nodes().size()); iii++) { ModelPart::NodeIterator itNode = it_begin+iii; if( this->IsSlip(*itNode) ) { //this->RotationOperator<TLocalMatrixType>(Rotation,); if(mDomainSize == 3) { BoundedMatrix<double,3,3> rRot; LocalRotationOperatorPure(rRot,*itNode); array_1d<double,3>& rVelocity = itNode->FastGetSolutionStepValue(VELOCITY); for(unsigned int i = 0; i < 3; i++) Vel[i] = rVelocity[i]; noalias(Tmp) = prod(rRot,Vel); for(unsigned int i = 0; i < 3; i++) rVelocity[i] = Tmp[i]; } else { BoundedMatrix<double,2,2> rRot; LocalRotationOperatorPure(rRot,*itNode); array_1d<double,3>& rVelocity = itNode->FastGetSolutionStepValue(VELOCITY); for(unsigned int i = 0; i < 2; i++) Vel[i] = rVelocity[i]; noalias(Tmp) = prod(rRot,Vel); for(unsigned int i = 0; i < 2; i++) rVelocity[i] = Tmp[i]; } } } } /// Transform nodal velocities from the rotated system to the original one virtual void RecoverVelocities(ModelPart& rModelPart) const { TLocalVectorType Vel(mDomainSize); TLocalVectorType Tmp(mDomainSize); ModelPart::NodeIterator it_begin = rModelPart.NodesBegin(); #pragma omp parallel for firstprivate(Vel,Tmp) for(int iii=0; iii<static_cast<int>(rModelPart.Nodes().size()); iii++) { ModelPart::NodeIterator itNode = it_begin+iii; if( this->IsSlip(*itNode) ) { if(mDomainSize == 3) { BoundedMatrix<double,3,3> rRot; LocalRotationOperatorPure(rRot,*itNode); array_1d<double,3>& rVelocity = itNode->FastGetSolutionStepValue(VELOCITY); for(unsigned int i = 0; i < 3; i++) Vel[i] = rVelocity[i]; noalias(Tmp) = prod(trans(rRot),Vel); for(unsigned int i = 0; i < 3; i++) rVelocity[i] = Tmp[i]; } else { BoundedMatrix<double,2,2> rRot; LocalRotationOperatorPure(rRot,*itNode); array_1d<double,3>& rVelocity = itNode->FastGetSolutionStepValue(VELOCITY); for(unsigned int i = 0; i < 2; i++) Vel[i] = rVelocity[i]; noalias(Tmp) = prod(trans(rRot),Vel); for(unsigned int i = 0; i < 2; i++) rVelocity[i] = Tmp[i]; } } } } ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a string. virtual std::string Info() const { std::stringstream buffer; buffer << "CoordinateTransformationUtils"; return buffer.str(); } /// Print information about this object. virtual void PrintInfo(std::ostream& rOStream) const { rOStream << "CoordinateTransformationUtils"; } /// Print object's data. virtual void PrintData(std::ostream& rOStream) const {} ///@} ///@name Friends ///@{ ///@} protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ template<unsigned int TDim, unsigned int TBlockSize, unsigned int TSkip = 0> void RotateAux(TLocalMatrixType& rLocalMatrix, TLocalVectorType& rLocalVector, GeometryType& rGeometry) const { const unsigned int LocalSize = rLocalVector.size(); unsigned int Index = 0; int rotations_needed = 0; const unsigned int NumBlocks = LocalSize / TBlockSize; DenseVector<bool> NeedRotation( NumBlocks, false); std::vector< BoundedMatrix<double,TBlockSize,TBlockSize> > rRot(NumBlocks); for(unsigned int j = 0; j < NumBlocks; ++j) { if( this->IsSlip(rGeometry[j]) ) { NeedRotation[j] = true; rotations_needed++; if (TDim == 2) LocalRotationOperator2D<TBlockSize,TSkip>(rRot[j],rGeometry[j]); else LocalRotationOperator3D<TBlockSize,TSkip>(rRot[j],rGeometry[j]); } Index += TBlockSize; } if(rotations_needed > 0) { BoundedMatrix<double,TBlockSize,TBlockSize> mat_block, tmp; array_1d<double,TBlockSize> aux, aux1; for(unsigned int i=0; i<NumBlocks; i++) { if(NeedRotation[i] == true) { for(unsigned int j=0; j<NumBlocks; j++) { if(NeedRotation[j] == true) { ReadBlockMatrix<TBlockSize>(mat_block, rLocalMatrix, i*TBlockSize, j*TBlockSize); noalias(tmp) = prod(mat_block,trans(rRot[j])); noalias(mat_block) = prod(rRot[i],tmp); WriteBlockMatrix<TBlockSize>(mat_block, rLocalMatrix, i*TBlockSize, j*TBlockSize); } else { ReadBlockMatrix<TBlockSize>(mat_block, rLocalMatrix, i*TBlockSize, j*TBlockSize); noalias(tmp) = prod(rRot[i],mat_block); WriteBlockMatrix<TBlockSize>(tmp, rLocalMatrix, i*TBlockSize, j*TBlockSize); } } for(unsigned int k=0; k<TBlockSize; k++) aux[k] = rLocalVector[i*TBlockSize+k]; noalias(aux1) = prod(rRot[i],aux); for(unsigned int k=0; k<TBlockSize; k++) rLocalVector[i*TBlockSize+k] = aux1[k]; } else { for(unsigned int j=0; j<NumBlocks; j++) { if(NeedRotation[j] == true) { ReadBlockMatrix<TBlockSize>(mat_block, rLocalMatrix, i*TBlockSize, j*TBlockSize); noalias(tmp) = prod(mat_block,trans(rRot[j])); WriteBlockMatrix<TBlockSize>(tmp, rLocalMatrix, i*TBlockSize, j*TBlockSize); } } } } } } //to be used when there is only velocity (no additional pressure or other var block) template<unsigned int TDim> void RotateAuxPure(TLocalMatrixType& rLocalMatrix, TLocalVectorType& rLocalVector, GeometryType& rGeometry) const { const unsigned int LocalSize = rLocalVector.size(); unsigned int Index = 0; int rotations_needed = 0; const unsigned int NumBlocks = LocalSize / mBlockSize; DenseVector<bool> NeedRotation( NumBlocks, false); std::vector< BoundedMatrix<double,TDim,TDim> > rRot(NumBlocks); for(unsigned int j = 0; j < NumBlocks; ++j) { if( this->IsSlip(rGeometry[j]) ) { NeedRotation[j] = true; rotations_needed++; LocalRotationOperatorPure(rRot[j],rGeometry[j]); } Index += mBlockSize; } if(rotations_needed > 0) { BoundedMatrix<double,TDim,TDim> mat_block, tmp; array_1d<double,TDim> aux, aux1; for(unsigned int i=0; i<NumBlocks; i++) { if(NeedRotation[i] == true) { for(unsigned int j=0; j<NumBlocks; j++) { if(NeedRotation[j] == true) { ReadBlockMatrix<TDim>(mat_block, rLocalMatrix, i*mBlockSize, j*mBlockSize); noalias(tmp) = prod(mat_block,trans(rRot[j])); noalias(mat_block) = prod(rRot[i],tmp); WriteBlockMatrix<TDim>(mat_block, rLocalMatrix, i*mBlockSize, j*mBlockSize); } else { ReadBlockMatrix<TDim>(mat_block, rLocalMatrix, i*mBlockSize, j*mBlockSize); noalias(tmp) = prod(rRot[i],mat_block); WriteBlockMatrix<TDim>(tmp, rLocalMatrix, i*mBlockSize, j*mBlockSize); } } for(unsigned int k=0; k<TDim; k++) aux[k] = rLocalVector[i*mBlockSize+k]; noalias(aux1) = prod(rRot[i],aux); for(unsigned int k=0; k<TDim; k++) rLocalVector[i*mBlockSize+k] = aux1[k]; } else { for(unsigned int j=0; j<NumBlocks; j++) { if(NeedRotation[j] == true) { ReadBlockMatrix<TDim>(mat_block, rLocalMatrix, i*mBlockSize, j*mBlockSize); noalias(tmp) = prod(mat_block,trans(rRot[j])); WriteBlockMatrix<TDim>(tmp, rLocalMatrix, i*mBlockSize, j*mBlockSize); } } } } } } template<unsigned int TBlockSize, unsigned int TSkip = 0> void LocalRotationOperator2D( BoundedMatrix<double,TBlockSize,TBlockSize>& rRot, GeometryType::PointType& rThisPoint) const { noalias(rRot) = IdentityMatrix(TBlockSize); // Get the normal evaluated at the node const array_1d<double,3>& rNormal = rThisPoint.FastGetSolutionStepValue(NORMAL); double aux = rNormal[0]*rNormal[0] + rNormal[1]*rNormal[1]; aux = sqrt(aux); rRot(TSkip ,TSkip ) = rNormal[0]/aux; rRot(TSkip ,TSkip+1) = rNormal[1]/aux; rRot(TSkip+1,TSkip ) = -rNormal[1]/aux; rRot(TSkip+1,TSkip+1) = rNormal[0]/aux; } template<unsigned int TBlockSize, unsigned int TSkip = 0> void LocalRotationOperator3D( BoundedMatrix<double,TBlockSize,TBlockSize>& rRot, GeometryType::PointType& rThisPoint) const { noalias(rRot) = IdentityMatrix(TBlockSize); // Get the normal evaluated at the node const array_1d<double,3>& rNormal = rThisPoint.FastGetSolutionStepValue(NORMAL); double aux = rNormal[0]*rNormal[0] + rNormal[1]*rNormal[1] + rNormal[2]*rNormal[2]; aux = sqrt(aux); rRot(TSkip,TSkip ) = rNormal[0]/aux; rRot(TSkip,TSkip+1) = rNormal[1]/aux; rRot(TSkip,TSkip+2) = rNormal[2]/aux; // Define the new coordinate system, where the first vector is aligned with the normal // To choose the remaining two vectors, we project the first component of the cartesian base to the tangent plane array_1d<double,3> rT1; rT1(0) = 1.0; rT1(1) = 0.0; rT1(2) = 0.0; double dot = rRot(TSkip,TSkip);//this->Dot(rN,rT1); // It is possible that the normal is aligned with (1,0,0), resulting in norm(rT1) = 0 // If this is the case, repeat the procedure using (0,1,0) if ( fabs(dot) > 0.99 ) { rT1(0) = 0.0; rT1(1) = 1.0; rT1(2) = 0.0; dot = rRot(TSkip,TSkip+1); //this->Dot(rN,rT1); } // calculate projection and normalize rT1[0] -= dot*rRot(TSkip,TSkip); rT1[1] -= dot*rRot(TSkip,TSkip+1); rT1[2] -= dot*rRot(TSkip,TSkip+2); this->Normalize(rT1); rRot(TSkip+1,TSkip ) = rT1[0]; rRot(TSkip+1,TSkip+1) = rT1[1]; rRot(TSkip+1,TSkip+2) = rT1[2]; // The third base component is choosen as N x T1, which is normalized by construction rRot(TSkip+2,TSkip ) = rRot(TSkip,TSkip+1)*rT1[2] - rRot(TSkip,TSkip+2)*rT1[1]; rRot(TSkip+2,TSkip+1) = rRot(TSkip,TSkip+2)*rT1[0] - rRot(TSkip,TSkip )*rT1[2]; rRot(TSkip+2,TSkip+2) = rRot(TSkip,TSkip )*rT1[1] - rRot(TSkip,TSkip+1)*rT1[0]; } void LocalRotationOperatorPure( BoundedMatrix<double,3,3>& rRot, const GeometryType::PointType& rThisPoint) const { // Get the normal evaluated at the node const array_1d<double,3>& rNormal = rThisPoint.FastGetSolutionStepValue(NORMAL); double aux = rNormal[0]*rNormal[0] + rNormal[1]*rNormal[1] + rNormal[2]*rNormal[2]; aux = sqrt(aux); rRot(0,0) = rNormal[0]/aux; rRot(0,1) = rNormal[1]/aux; rRot(0,2) = rNormal[2]/aux; // Define the new coordinate system, where the first vector is aligned with the normal // To choose the remaining two vectors, we project the first component of the cartesian base to the tangent plane array_1d<double,3> rT1; rT1(0) = 1.0; rT1(1) = 0.0; rT1(2) = 0.0; double dot = rRot(0,0);//this->Dot(rN,rT1); // It is possible that the normal is aligned with (1,0,0), resulting in norm(rT1) = 0 // If this is the case, repeat the procedure using (0,1,0) if ( fabs(dot) > 0.99 ) { rT1(0) = 0.0; rT1(1) = 1.0; rT1(2) = 0.0; dot = rRot(0,1); //this->Dot(rN,rT1); } // calculate projection and normalize rT1[0] -= dot*rRot(0,0); rT1[1] -= dot*rRot(0,1); rT1[2] -= dot*rRot(0,2); this->Normalize(rT1); rRot(1,0) = rT1[0]; rRot(1,1) = rT1[1]; rRot(1,2) = rT1[2]; // The third base component is choosen as N x T1, which is normalized by construction rRot(2,0) = rRot(0,1)*rT1[2] - rRot(0,2)*rT1[1]; rRot(2,1) = rRot(0,2)*rT1[0] - rRot(0,0)*rT1[2]; rRot(2,2) = rRot(0,0)*rT1[1] - rRot(0,1)*rT1[0]; } void LocalRotationOperatorPure( BoundedMatrix<double,2,2>& rRot, const GeometryType::PointType& rThisPoint) const { // Get the normal evaluated at the node const array_1d<double,3>& rNormal = rThisPoint.FastGetSolutionStepValue(NORMAL); double aux = rNormal[0]*rNormal[0] + rNormal[1]*rNormal[1]; aux = sqrt(aux); rRot(0,0) = rNormal[0]/aux; rRot(0,1) = rNormal[1]/aux; rRot(1,0) = -rNormal[1]/aux; rRot(1,1) = rNormal[0]/aux; } bool IsSlip(const Node<3>& rNode) const { return rNode.Is(mrFlag); } /// Normalize a vector. /** * @param rThis the vector * @return Original norm of the input vector */ template< class TVectorType > double Normalize(TVectorType& rThis) const { double Norm = 0; for(typename TVectorType::iterator iComponent = rThis.begin(); iComponent < rThis.end(); ++iComponent) Norm += (*iComponent)*(*iComponent); Norm = sqrt(Norm); for(typename TVectorType::iterator iComponent = rThis.begin(); iComponent < rThis.end(); ++iComponent) *iComponent /= Norm; return Norm; } ///@} ///@name Protected Access ///@{ unsigned int GetDomainSize() const { return mDomainSize; } unsigned int GetBlockSize() const { return mBlockSize; } ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ /// Number of spatial dimensions const unsigned int mDomainSize; /// Number of matrix or vector rows associated to each node. /** @note Velocity Dofs are assumed to be the first mDomainSize rows. */ const unsigned int mBlockSize; const Kratos::Flags& mrFlag; ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ // /// Compute a rotation matrix to transform values from the cartesian base to one oriented with the node's normal // /** // * The normal is read from solution step data NORMAL. Use NormalCalculationUtils::CalculateOnSimplex to // * obtain and store the nodal normal from the normals of the model's conditons. // * @param rRot The rotation matrix (output) // * @param rThisPoint The point used to orient the new coordinate system. // * @see NormalCalculationUtils // */ // template<class TMatrixType> // void RotationOperator(TMatrixType& rRot, // GeometryType::PointType& rThisPoint) const // { // typedef boost::numeric::ublas::matrix_row<TMatrixType> ThisRowType; // // Get the normal evaluated at the node // const array_1d<double,3>& rNormal = rThisPoint.FastGetSolutionStepValue(NORMAL); // // if(mDomainSize == 3) // { // // Define the new coordinate system, where the first vector is aligned with the normal // ThisRowType rN(rRot,0); // for( unsigned int i = 0; i < 3; ++i) // rN[i] = rNormal[i]; // this->Normalize(rN); // // // To choose the remaining two vectors, we project the first component of the cartesian base to the tangent plane // ThisRowType rT1(rRot,1); // rT1(0) = 1.0; // rT1(1) = 0.0; // rT1(2) = 0.0; // // double dot = this->Dot(rN,rT1); // // // It is possible that the normal is aligned with (1,0,0), resulting in norm(rT1) = 0 // // If this is the case, repeat the procedure using (0,1,0) // if ( fabs(dot) > 0.99 ) // { // rT1(0) = 0.0; // rT1(1) = 1.0; // rT1(2) = 0.0; // // dot = this->Dot(rN,rT1); // } // // // calculate projection and normalize // rT1 -= dot * rN; // this->Normalize(rT1); // // // The third base component is choosen as N x T1, which is normalized by construction // ThisRowType rT2(rRot,2); // rT2(0) = rN(1)*rT1(2) - rN(2)*rT1(1); // rT2(1) = rN(2)*rT1(0) - rN(0)*rT1(2); // rT2(2) = rN(0)*rT1(1) - rN(1)*rT1(0); // } // else //if(mDomainSize == 2) // { // /* The basis for the new coordinate system is (normal,tangent) // Tangent vector is chosen (-normal_y, normal_x) so that the resulting base // is right-handed. // */ // ThisRowType rN(rRot,0); // ThisRowType rT(rRot,1); // // rN[0] = rNormal[0]; // rN[1] = rNormal[1]; // this->Normalize(rN); // rT[0] = -rN[1]; // rT[1] = rN[0]; // } // // } template< class TVectorType > double Dot(const TVectorType& rV1, const TVectorType& rV2) const { double dot = 0.0; for( typename TVectorType::const_iterator iV1 = rV1.begin(),iV2 = rV2.begin(); iV1 != rV1.end(); ++iV1, ++iV2) { dot += (*iV1) * (*iV2); } return dot; } inline double VectorNormDerivative( const double ValueNorm, const array_1d<double, 3>& rValue, const array_1d<double, 3>& rValueDerivative) const { return inner_prod(rValue, rValueDerivative) / ValueNorm; } inline array_1d<double, 3> UnitVectorDerivative( const double VectorNorm, const double VectorNormDerivative, const array_1d<double, 3>& rVector, const array_1d<double, 3>& rVectorDerivative) const { return (rVectorDerivative * VectorNorm - rVector * VectorNormDerivative) / std::pow(VectorNorm, 2); } /// Transform a local contribution from cartesian coordinates to rotated ones // void ApplyRotation(TLocalMatrixType& rMatrix, // const TLocalMatrixType& rRotation) const // { // // compute B = R*A*transpose(R) // const unsigned int LocalSize = rMatrix.size1(); // const unsigned int NumBlocks = LocalSize / mBlockSize; // //TLocalMatrixType Tmp = ZeroMatrix(LocalSize,LocalSize); // /* // for (unsigned int iBlock = 0; iBlock < NumBlocks; iBlock++) // { // for (unsigned int jBlock = 0; jBlock < NumBlocks; jBlock++) // { // for (unsigned int i = iBlock*mBlockSize; i < (iBlock+1)*mBlockSize; i++) // { // for(unsigned int j = jBlock*mBlockSize; j < (jBlock+1)*mBlockSize; j++) // { // double& tij = Tmp(i,j); // for(unsigned int k = iBlock*mBlockSize; k < (iBlock+1)*mBlockSize; k++) // { // for(unsigned int l = jBlock*mBlockSize; l < (jBlock+1)*mBlockSize; l++) // { // tij += rRotation(i,k)*rMatrix(k,l)*rRotation(j,l); // } // } // } // } // } // }*/ // // Matrix Tmp = prod(rMatrix,trans(rRotation)); // noalias(rMatrix) = prod(rRotation,Tmp); // // // noalias(rMatrix) = Tmp; // } //auxiliary functions template< unsigned int TBlockSize > void ReadBlockMatrix( BoundedMatrix<double,TBlockSize, TBlockSize>& block, const Matrix& origin, const unsigned int Ibegin, const unsigned int Jbegin) const { for(unsigned int i=0; i<TBlockSize; i++) { for(unsigned int j=0; j<TBlockSize; j++) { block(i,j) = origin(Ibegin+i, Jbegin+j); } } } template< unsigned int TBlockSize > void WriteBlockMatrix( const BoundedMatrix<double,TBlockSize, TBlockSize>& block, Matrix& destination, const unsigned int Ibegin, const unsigned int Jbegin) const { for(unsigned int i=0; i<TBlockSize; i++) { for(unsigned int j=0; j<TBlockSize; j++) { destination(Ibegin+i, Jbegin+j) = block(i,j); } } } ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ /// Assignment operator. CoordinateTransformationUtils& operator=(CoordinateTransformationUtils const& rOther) {} /// Copy constructor. CoordinateTransformationUtils(CoordinateTransformationUtils const& rOther) {} ///@} }; ///@} ///@name Type Definitions ///@{ ///@} ///@name Input and output ///@{ /// input stream function template<class TLocalMatrixType, class TLocalVectorType, class TValueType> inline std::istream& operator >>(std::istream& rIStream, CoordinateTransformationUtils<TLocalMatrixType, TLocalVectorType, TValueType>& rThis) { return rIStream; } /// output stream function template<class TLocalMatrixType, class TLocalVectorType, class TValueType> inline std::ostream& operator <<(std::ostream& rOStream, const CoordinateTransformationUtils<TLocalMatrixType, TLocalVectorType, TValueType>& rThis) { rThis.PrintInfo(rOStream); rOStream << std::endl; rThis.PrintData(rOStream); return rOStream; } ///@} ///@} addtogroup block } #endif // KRATOS_COORDINATE_TRANSFORMATION_UTILITIES_H
DenseMatrix.h
//================================================================================================= /*! // \file blaze/math/smp/openmp/DenseMatrix.h // \brief Header file for the OpenMP-based dense matrix SMP implementation // // Copyright (C) 2013 Klaus Iglberger - All Rights Reserved // // This file is part of the Blaze library. You can redistribute it and/or modify it under // the terms of the New (Revised) BSD License. Redistribution and use in source and binary // forms, with or without modification, are permitted provided that the following conditions // are met: // // 1. Redistributions of source code must retain the above copyright notice, this list of // conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, this list // of conditions and the following disclaimer in the documentation and/or other materials // provided with the distribution. // 3. Neither the names of the Blaze development group nor the names of its contributors // may be used to endorse or promote products derived from this software without specific // prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT // SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR // BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN // ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH // DAMAGE. */ //================================================================================================= #ifndef _BLAZE_MATH_SMP_OPENMP_DENSEMATRIX_H_ #define _BLAZE_MATH_SMP_OPENMP_DENSEMATRIX_H_ //************************************************************************************************* // Includes //************************************************************************************************* #include <omp.h> #include <blaze/math/constraints/SMPAssignable.h> #include <blaze/math/DenseSubmatrix.h> #include <blaze/math/expressions/DenseMatrix.h> #include <blaze/math/expressions/SparseMatrix.h> #include <blaze/math/Functions.h> #include <blaze/math/intrinsics/IntrinsicTrait.h> #include <blaze/math/smp/ParallelSection.h> #include <blaze/math/smp/SerialSection.h> #include <blaze/math/SparseSubmatrix.h> #include <blaze/math/StorageOrder.h> #include <blaze/math/traits/SubmatrixExprTrait.h> #include <blaze/math/typetraits/IsDenseMatrix.h> #include <blaze/math/typetraits/IsSMPAssignable.h> #include <blaze/system/SMP.h> #include <blaze/util/Assert.h> #include <blaze/util/EnableIf.h> #include <blaze/util/logging/FunctionTrace.h> #include <blaze/util/mpl/And.h> #include <blaze/util/mpl/Not.h> #include <blaze/util/mpl/Or.h> #include <blaze/util/StaticAssert.h> #include <blaze/util/Types.h> #include <blaze/util/typetraits/IsSame.h> namespace blaze { //================================================================================================= // // PLAIN ASSIGNMENT // //================================================================================================= //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Backend of the OpenMP-based SMP assignment of a row-major dense matrix to a dense matrix. // \ingroup math // // \param lhs The target left-hand side dense matrix. // \param rhs The right-hand side row-major dense matrix to be assigned. // \return void // // This function is the backend implementation of the OpenMP-based SMP assignment of a row-major // dense matrix to a dense matrix.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename MT1 // Type of the left-hand side dense matrix , bool SO // Storage order of the left-hand side dense matrix , typename MT2 > // Type of the right-hand side dense matrix void smpAssign_backend( DenseMatrix<MT1,SO>& lhs, const DenseMatrix<MT2,rowMajor>& rhs ) { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" ); typedef typename MT1::ElementType ET1; typedef typename MT2::ElementType ET2; typedef IntrinsicTrait<typename MT1::ElementType> IT; typedef typename SubmatrixExprTrait<MT1,aligned>::Type AlignedTarget; typedef typename SubmatrixExprTrait<MT1,unaligned>::Type UnalignedTarget; const bool vectorizable( MT1::vectorizable && MT2::vectorizable && IsSame<ET1,ET2>::value ); const bool lhsAligned ( (~lhs).isAligned() ); const bool rhsAligned ( (~rhs).isAligned() ); const int threads ( omp_get_num_threads() ); const size_t addon ( ( ( (~lhs).rows() % threads ) != 0UL )? 1UL : 0UL ); const size_t equalShare ( (~lhs).rows() / threads + addon ); const size_t rest ( equalShare & ( IT::size - 1UL ) ); const size_t rowsPerThread( ( vectorizable && rest )?( equalShare - rest + IT::size ):( equalShare ) ); #pragma omp for schedule(dynamic,1) nowait for( int i=0UL; i<threads; ++i ) { const size_t row( i*rowsPerThread ); if( row >= (~lhs).rows() ) continue; const size_t m( min( rowsPerThread, (~lhs).rows() - row ) ); if( vectorizable && lhsAligned && rhsAligned ) { AlignedTarget target( submatrix<aligned>( ~lhs, row, 0UL, m, (~lhs).columns() ) ); assign( target, submatrix<aligned>( ~rhs, row, 0UL, m, (~lhs).columns() ) ); } else if( vectorizable && lhsAligned ) { AlignedTarget target( submatrix<aligned>( ~lhs, row, 0UL, m, (~lhs).columns() ) ); assign( target, submatrix<unaligned>( ~rhs, row, 0UL, m, (~lhs).columns() ) ); } else if( vectorizable && rhsAligned ) { UnalignedTarget target( submatrix<unaligned>( ~lhs, row, 0UL, m, (~lhs).columns() ) ); assign( target, submatrix<aligned>( ~rhs, row, 0UL, m, (~lhs).columns() ) ); } else { UnalignedTarget target( submatrix<unaligned>( ~lhs, row, 0UL, m, (~lhs).columns() ) ); assign( target, submatrix<unaligned>( ~rhs, row, 0UL, m, (~lhs).columns() ) ); } } } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Backend of the OpenMP-based SMP assignment of a column-major dense matrix to a dense matrix. // \ingroup math // // \param lhs The target left-hand side dense matrix. // \param rhs The right-hand side column-major dense matrix to be assigned. // \return void // // This function is the backend implementation of the OpenMP-based SMP assignment of a column-major // dense matrix to a dense matrix.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename MT1 // Type of the left-hand side dense matrix , bool SO // Storage order of the left-hand side dense matrix , typename MT2 > // Type of the right-hand side dense matrix void smpAssign_backend( DenseMatrix<MT1,SO>& lhs, const DenseMatrix<MT2,columnMajor>& rhs ) { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" ); typedef typename MT1::ElementType ET1; typedef typename MT2::ElementType ET2; typedef IntrinsicTrait<typename MT1::ElementType> IT; typedef typename SubmatrixExprTrait<MT1,aligned>::Type AlignedTarget; typedef typename SubmatrixExprTrait<MT1,unaligned>::Type UnalignedTarget; const bool vectorizable( MT1::vectorizable && MT2::vectorizable && IsSame<ET1,ET2>::value ); const bool lhsAligned ( (~lhs).isAligned() ); const bool rhsAligned ( (~rhs).isAligned() ); const int threads ( omp_get_num_threads() ); const size_t addon ( ( ( (~lhs).columns() % threads ) != 0UL )? 1UL : 0UL ); const size_t equalShare ( (~lhs).columns() / threads + addon ); const size_t rest ( equalShare & ( IT::size - 1UL ) ); const size_t colsPerThread( ( vectorizable && rest )?( equalShare - rest + IT::size ):( equalShare ) ); #pragma omp for schedule(dynamic,1) nowait for( int i=0UL; i<threads; ++i ) { const size_t column( i*colsPerThread ); if( column >= (~lhs).columns() ) continue; const size_t n( min( colsPerThread, (~lhs).columns() - column ) ); if( vectorizable && lhsAligned && rhsAligned ) { AlignedTarget target( submatrix<aligned>( ~lhs, 0UL, column, (~lhs).rows(), n ) ); assign( target, submatrix<aligned>( ~rhs, 0UL, column, (~lhs).rows(), n ) ); } else if( vectorizable && lhsAligned ) { AlignedTarget target( submatrix<aligned>( ~lhs, 0UL, column, (~lhs).rows(), n ) ); assign( target, submatrix<unaligned>( ~rhs, 0UL, column, (~lhs).rows(), n ) ); } else if( vectorizable && rhsAligned ) { UnalignedTarget target( submatrix<unaligned>( ~lhs, 0UL, column, (~lhs).rows(), n ) ); assign( target, submatrix<aligned>( ~rhs, 0UL, column, (~lhs).rows(), n ) ); } else { UnalignedTarget target( submatrix<unaligned>( ~lhs, 0UL, column, (~lhs).rows(), n ) ); assign( target, submatrix<unaligned>( ~rhs, 0UL, column, (~lhs).rows(), n ) ); } } } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Backend of the OpenMP-based SMP assignment of a row-major sparse matrix to a dense matrix. // \ingroup math // // \param lhs The target left-hand side dense matrix. // \param rhs The right-hand side row-major sparse matrix to be assigned. // \return void // // This function is the backend implementation of the OpenMP-based SMP assignment of a row-major // sparse matrix to a dense matrix.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename MT1 // Type of the left-hand side dense matrix , bool SO // Storage order of the left-hand side dense matrix , typename MT2 > // Type of the right-hand side sparse matrix void smpAssign_backend( DenseMatrix<MT1,SO>& lhs, const SparseMatrix<MT2,rowMajor>& rhs ) { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" ); typedef typename MT1::ElementType ET1; typedef typename MT2::ElementType ET2; typedef typename SubmatrixExprTrait<MT1,unaligned>::Type UnalignedTarget; const int threads ( omp_get_num_threads() ); const size_t addon ( ( ( (~lhs).rows() % threads ) != 0UL )? 1UL : 0UL ); const size_t rowsPerThread( (~lhs).rows() / threads + addon ); #pragma omp for schedule(dynamic,1) nowait for( int i=0UL; i<threads; ++i ) { const size_t row( i*rowsPerThread ); if( row >= (~lhs).rows() ) continue; const size_t m( min( rowsPerThread, (~lhs).rows() - row ) ); UnalignedTarget target( submatrix<unaligned>( ~lhs, row, 0UL, m, (~lhs).columns() ) ); assign( target, submatrix<unaligned>( ~rhs, row, 0UL, m, (~lhs).columns() ) ); } } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Backend of the OpenMP-based SMP assignment of a column-major sparse matrix to a dense matrix. // \ingroup math // // \param lhs The target left-hand side dense matrix. // \param rhs The right-hand side column-major sparse matrix to be assigned. // \return void // // This function is the backend implementation of the OpenMP-based SMP assignment of a column-major // sparse matrix to a dense matrix.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename MT1 // Type of the left-hand side dense matrix , bool SO // Storage order of the left-hand side dense matrix , typename MT2 > // Type of the right-hand side sparse matrix void smpAssign_backend( DenseMatrix<MT1,SO>& lhs, const SparseMatrix<MT2,columnMajor>& rhs ) { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" ); typedef typename MT1::ElementType ET1; typedef typename MT2::ElementType ET2; typedef typename SubmatrixExprTrait<MT1,unaligned>::Type UnalignedTarget; const int threads ( omp_get_num_threads() ); const size_t addon ( ( ( (~lhs).columns() % threads ) != 0UL )? 1UL : 0UL ); const size_t colsPerThread( (~lhs).columns() / threads + addon ); #pragma omp for schedule(dynamic,1) nowait for( int i=0UL; i<threads; ++i ) { const size_t column( i*colsPerThread ); if( column >= (~lhs).columns() ) continue; const size_t n( min( colsPerThread, (~lhs).columns() - column ) ); UnalignedTarget target( submatrix<unaligned>( ~lhs, 0UL, column, (~lhs).rows(), n ) ); assign( target, submatrix<unaligned>( ~rhs, 0UL, column, (~lhs).rows(), n ) ); } } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Default implementation of the OpenMP-based SMP assignment to a dense matrix. // \ingroup smp // // \param lhs The target left-hand side dense matrix. // \param rhs The right-hand side matrix to be assigned. // \return void // // This function implements the default OpenMP-based SMP assignment to a dense matrix. Due to // the explicit application of the SFINAE principle, this function can only be selected by the // compiler in case both operands are SMP-assignable and the element types of both operands are // not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename MT1 // Type of the left-hand side dense matrix , bool SO1 // Storage order of the left-hand side dense matrix , typename MT2 // Type of the right-hand side matrix , bool SO2 > // Storage order of the right-hand side matrix inline typename EnableIf< And< IsDenseMatrix<MT1> , Or< Not< IsSMPAssignable<MT1> > , Not< IsSMPAssignable<MT2> > > > >::Type smpAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs ) { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" ); BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" ); assign( ~lhs, ~rhs ); } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Implementation of the OpenMP-based SMP assignment to a dense matrix. // \ingroup math // // \param lhs The target left-hand side dense matrix. // \param rhs The right-hand side column-major sparse matrix to be assigned. // \return void // // This function implements the OpenMP-based SMP assignment to a dense matrix. Due to the // explicit application of the SFINAE principle, this function can only be selected by the // compiler in case both operands are SMP-assignable and the element types of both operands // are not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename MT1 // Type of the left-hand side dense matrix , bool SO1 // Storage order of the left-hand side dense matrix , typename MT2 // Type of the right-hand side matrix , bool SO2 > // Storage order of the right-hand side matrix inline typename EnableIf< And< IsDenseMatrix<MT1> , IsSMPAssignable<MT1> , IsSMPAssignable<MT2> > >::Type smpAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs ) { BLAZE_FUNCTION_TRACE; BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( typename MT1::ElementType ); BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( typename MT2::ElementType ); BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" ); BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" ); BLAZE_PARALLEL_SECTION { if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) { assign( ~lhs, ~rhs ); } else { #pragma omp parallel shared( lhs, rhs ) smpAssign_backend( ~lhs, ~rhs ); } } } /*! \endcond */ //************************************************************************************************* //================================================================================================= // // ADDITION ASSIGNMENT // //================================================================================================= //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Backend of the OpenMP-based SMP addition assignment of a row-major dense matrix // to a dense matrix. // \ingroup math // // \param lhs The target left-hand side dense matrix. // \param rhs The right-hand side row-major dense matrix to be added. // \return void // // This function is the backend implementation of the OpenMP-based SMP addition assignment of a // row-major dense matrix to a dense matrix.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename MT1 // Type of the left-hand side dense matrix , bool SO // Storage order of the left-hand side dense matrix , typename MT2 > // Type of the right-hand side dense matrix void smpAddAssign_backend( DenseMatrix<MT1,SO>& lhs, const DenseMatrix<MT2,rowMajor>& rhs ) { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" ); typedef typename MT1::ElementType ET1; typedef typename MT2::ElementType ET2; typedef IntrinsicTrait<typename MT1::ElementType> IT; typedef typename SubmatrixExprTrait<MT1,aligned>::Type AlignedTarget; typedef typename SubmatrixExprTrait<MT1,unaligned>::Type UnalignedTarget; const bool vectorizable( MT1::vectorizable && MT2::vectorizable && IsSame<ET1,ET2>::value ); const bool lhsAligned ( (~lhs).isAligned() ); const bool rhsAligned ( (~rhs).isAligned() ); const int threads ( omp_get_num_threads() ); const size_t addon ( ( ( (~lhs).rows() % threads ) != 0UL )? 1UL : 0UL ); const size_t equalShare ( (~lhs).rows() / threads + addon ); const size_t rest ( equalShare & ( IT::size - 1UL ) ); const size_t rowsPerThread( ( vectorizable && rest )?( equalShare - rest + IT::size ):( equalShare ) ); #pragma omp for schedule(dynamic,1) nowait for( int i=0UL; i<threads; ++i ) { const size_t row( i*rowsPerThread ); if( row >= (~lhs).rows() ) continue; const size_t m( min( rowsPerThread, (~lhs).rows() - row ) ); if( vectorizable && lhsAligned && rhsAligned ) { AlignedTarget target( submatrix<aligned>( ~lhs, row, 0UL, m, (~lhs).columns() ) ); addAssign( target, submatrix<aligned>( ~rhs, row, 0UL, m, (~lhs).columns() ) ); } else if( vectorizable && lhsAligned ) { AlignedTarget target( submatrix<aligned>( ~lhs, row, 0UL, m, (~lhs).columns() ) ); addAssign( target, submatrix<unaligned>( ~rhs, row, 0UL, m, (~lhs).columns() ) ); } else if( vectorizable && rhsAligned ) { UnalignedTarget target( submatrix<unaligned>( ~lhs, row, 0UL, m, (~lhs).columns() ) ); addAssign( target, submatrix<aligned>( ~rhs, row, 0UL, m, (~lhs).columns() ) ); } else { UnalignedTarget target( submatrix<unaligned>( ~lhs, row, 0UL, m, (~lhs).columns() ) ); addAssign( target, submatrix<unaligned>( ~rhs, row, 0UL, m, (~lhs).columns() ) ); } } } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Backend of the OpenMP-based SMP addition assignment of a column-major dense matrix // to a dense matrix. // \ingroup math // // \param lhs The target left-hand side dense matrix. // \param rhs The right-hand side column-major dense matrix to be added. // \return void // // This function is the backend implementation of the OpenMP-based SMP addition assignment of a // column-major dense matrix to a dense matrix.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename MT1 // Type of the left-hand side dense matrix , bool SO // Storage order of the left-hand side dense matrix , typename MT2 > // Type of the right-hand side dense matrix void smpAddAssign_backend( DenseMatrix<MT1,SO>& lhs, const DenseMatrix<MT2,columnMajor>& rhs ) { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" ); typedef typename MT1::ElementType ET1; typedef typename MT2::ElementType ET2; typedef IntrinsicTrait<typename MT1::ElementType> IT; typedef typename SubmatrixExprTrait<MT1,aligned>::Type AlignedTarget; typedef typename SubmatrixExprTrait<MT1,unaligned>::Type UnalignedTarget; const bool vectorizable( MT1::vectorizable && MT2::vectorizable && IsSame<ET1,ET2>::value ); const bool lhsAligned ( (~lhs).isAligned() ); const bool rhsAligned ( (~rhs).isAligned() ); const int threads ( omp_get_num_threads() ); const size_t addon ( ( ( (~lhs).columns() % threads ) != 0UL )? 1UL : 0UL ); const size_t equalShare ( (~lhs).columns() / threads + addon ); const size_t rest ( equalShare & ( IT::size - 1UL ) ); const size_t colsPerThread( ( vectorizable && rest )?( equalShare - rest + IT::size ):( equalShare ) ); #pragma omp for schedule(dynamic,1) nowait for( int i=0UL; i<threads; ++i ) { const size_t column( i*colsPerThread ); if( column >= (~lhs).columns() ) continue; const size_t n( min( colsPerThread, (~lhs).columns() - column ) ); if( vectorizable && lhsAligned && rhsAligned ) { AlignedTarget target( submatrix<aligned>( ~lhs, 0UL, column, (~lhs).rows(), n ) ); addAssign( target, submatrix<aligned>( ~rhs, 0UL, column, (~lhs).rows(), n ) ); } else if( vectorizable && lhsAligned ) { AlignedTarget target( submatrix<aligned>( ~lhs, 0UL, column, (~lhs).rows(), n ) ); addAssign( target, submatrix<unaligned>( ~rhs, 0UL, column, (~lhs).rows(), n ) ); } else if( vectorizable && rhsAligned ) { UnalignedTarget target( submatrix<unaligned>( ~lhs, 0UL, column, (~lhs).rows(), n ) ); addAssign( target, submatrix<aligned>( ~rhs, 0UL, column, (~lhs).rows(), n ) ); } else { UnalignedTarget target( submatrix<unaligned>( ~lhs, 0UL, column, (~lhs).rows(), n ) ); addAssign( target, submatrix<unaligned>( ~rhs, 0UL, column, (~lhs).rows(), n ) ); } } } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Backend of the OpenMP-based SMP addition assignment of a row-major sparse matrix // to a dense matrix. // \ingroup math // // \param lhs The target left-hand side dense matrix. // \param rhs The right-hand side row-major sparse matrix to be added. // \return void // // This function is the backend implementation of the OpenMP-based SMP addition assignment of a // row-major sparse matrix to a dense matrix.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename MT1 // Type of the left-hand side dense matrix , bool SO // Storage order of the left-hand side dense matrix , typename MT2 > // Type of the right-hand side sparse matrix void smpAddAssign_backend( DenseMatrix<MT1,SO>& lhs, const SparseMatrix<MT2,rowMajor>& rhs ) { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" ); typedef typename MT1::ElementType ET1; typedef typename MT2::ElementType ET2; typedef typename SubmatrixExprTrait<MT1,unaligned>::Type UnalignedTarget; const int threads ( omp_get_num_threads() ); const size_t addon ( ( ( (~lhs).rows() % threads ) != 0UL )? 1UL : 0UL ); const size_t rowsPerThread( (~lhs).rows() / threads + addon ); #pragma omp for schedule(dynamic,1) nowait for( int i=0UL; i<threads; ++i ) { const size_t row( i*rowsPerThread ); if( row >= (~lhs).rows() ) continue; const size_t m( min( rowsPerThread, (~lhs).rows() - row ) ); UnalignedTarget target( submatrix<unaligned>( ~lhs, row, 0UL, m, (~lhs).columns() ) ); addAssign( target, submatrix<unaligned>( ~rhs, row, 0UL, m, (~lhs).columns() ) ); } } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Backend of the OpenMP-based SMP addition assignment of a column-major sparse matrix // to a dense matrix. // \ingroup math // // \param lhs The target left-hand side dense matrix. // \param rhs The right-hand side column-major sparse matrix to be added. // \return void // // This function is the backend implementation of the OpenMP-based SMP addition assignment of a // column-major sparse matrix to a dense matrix.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename MT1 // Type of the left-hand side dense matrix , bool SO // Storage order of the left-hand side dense matrix , typename MT2 > // Type of the right-hand side sparse matrix void smpAddAssign_backend( DenseMatrix<MT1,SO>& lhs, const SparseMatrix<MT2,columnMajor>& rhs ) { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" ); typedef typename MT1::ElementType ET1; typedef typename MT2::ElementType ET2; typedef typename SubmatrixExprTrait<MT1,unaligned>::Type UnalignedTarget; const int threads ( omp_get_num_threads() ); const size_t addon ( ( ( (~lhs).columns() % threads ) != 0UL )? 1UL : 0UL ); const size_t colsPerThread( (~lhs).columns() / threads + addon ); #pragma omp for schedule(dynamic,1) nowait for( int i=0UL; i<threads; ++i ) { const size_t column( i*colsPerThread ); if( column >= (~lhs).columns() ) continue; const size_t n( min( colsPerThread, (~lhs).columns() - column ) ); UnalignedTarget target( submatrix<unaligned>( ~lhs, 0UL, column, (~lhs).rows(), n ) ); addAssign( target, submatrix<unaligned>( ~rhs, 0UL, column, (~lhs).rows(), n ) ); } } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Default implementation of the OpenMP-based SMP addition assignment to a dense matrix. // \ingroup smp // // \param lhs The target left-hand side dense matrix. // \param rhs The right-hand side matrix to be added. // \return void // // This function implements the default OpenMP-based SMP addition assignment to a dense matrix. // Due to the explicit application of the SFINAE principle, this function can only be selected // by the compiler in case both operands are SMP-assignable and the element types of both operands // are not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename MT1 // Type of the left-hand side dense matrix , bool SO1 // Storage order of the left-hand side dense matrix , typename MT2 // Type of the right-hand side matrix , bool SO2 > // Storage order of the right-hand side matrix inline typename EnableIf< And< IsDenseMatrix<MT1> , Or< Not< IsSMPAssignable<MT1> > , Not< IsSMPAssignable<MT2> > > > >::Type smpAddAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs ) { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" ); BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" ); addAssign( ~lhs, ~rhs ); } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Implementation of the OpenMP-based SMP addition assignment to a dense matrix. // \ingroup math // // \param lhs The target left-hand side dense matrix. // \param rhs The right-hand side row-major dense matrix to be added. // \return void // // This function implements the OpenMP-based SMP addition assignment to a dense matrix. Due to // the explicit application of the SFINAE principle, this function can only be selected by the // compiler in case both operands are SMP-assignable and the element types of both operands are // not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename MT1 // Type of the left-hand side dense matrix , bool SO1 // Storage order of the left-hand side dense matrix , typename MT2 // Type of the right-hand side matrix , bool SO2 > // Storage order of the right-hand side matrix inline typename EnableIf< And< IsDenseMatrix<MT1> , IsSMPAssignable<MT1> , IsSMPAssignable<MT2> > >::Type smpAddAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs ) { BLAZE_FUNCTION_TRACE; BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( typename MT1::ElementType ); BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( typename MT2::ElementType ); BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" ); BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" ); BLAZE_PARALLEL_SECTION { if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) { addAssign( ~lhs, ~rhs ); } else { #pragma omp parallel shared( lhs, rhs ) smpAddAssign_backend( ~lhs, ~rhs ); } } } /*! \endcond */ //************************************************************************************************* //================================================================================================= // // SUBTRACTION ASSIGNMENT // //================================================================================================= //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Backend of the OpenMP-based SMP subtraction assignment of a row-major dense matrix // to a dense matrix. // \ingroup math // // \param lhs The target left-hand side dense matrix. // \param rhs The right-hand side row-major dense matrix to be subtracted. // \return void // // This function is the backend implementation of the OpenMP-based SMP subtraction assignment // of a row-major dense matrix to a dense matrix.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename MT1 // Type of the left-hand side dense matrix , bool SO // Storage order of the left-hand side dense matrix , typename MT2 > // Type of the right-hand side dense matrix void smpSubAssign_backend( DenseMatrix<MT1,SO>& lhs, const DenseMatrix<MT2,rowMajor>& rhs ) { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" ); typedef typename MT1::ElementType ET1; typedef typename MT2::ElementType ET2; typedef IntrinsicTrait<typename MT1::ElementType> IT; typedef typename SubmatrixExprTrait<MT1,aligned>::Type AlignedTarget; typedef typename SubmatrixExprTrait<MT1,unaligned>::Type UnalignedTarget; const bool vectorizable( MT1::vectorizable && MT2::vectorizable && IsSame<ET1,ET2>::value ); const bool lhsAligned ( (~lhs).isAligned() ); const bool rhsAligned ( (~rhs).isAligned() ); const int threads ( omp_get_num_threads() ); const size_t addon ( ( ( (~lhs).rows() % threads ) != 0UL )? 1UL : 0UL ); const size_t equalShare ( (~lhs).rows() / threads + addon ); const size_t rest ( equalShare & ( IT::size - 1UL ) ); const size_t rowsPerThread( ( vectorizable && rest )?( equalShare - rest + IT::size ):( equalShare ) ); #pragma omp for schedule(dynamic,1) nowait for( int i=0UL; i<threads; ++i ) { const size_t row( i*rowsPerThread ); if( row >= (~lhs).rows() ) continue; const size_t m( min( rowsPerThread, (~lhs).rows() - row ) ); if( vectorizable && lhsAligned && rhsAligned ) { AlignedTarget target( submatrix<aligned>( ~lhs, row, 0UL, m, (~lhs).columns() ) ); subAssign( target, submatrix<aligned>( ~rhs, row, 0UL, m, (~lhs).columns() ) ); } else if( vectorizable && lhsAligned ) { AlignedTarget target( submatrix<aligned>( ~lhs, row, 0UL, m, (~lhs).columns() ) ); subAssign( target, submatrix<unaligned>( ~rhs, row, 0UL, m, (~lhs).columns() ) ); } else if( vectorizable && rhsAligned ) { UnalignedTarget target( submatrix<unaligned>( ~lhs, row, 0UL, m, (~lhs).columns() ) ); subAssign( target, submatrix<aligned>( ~rhs, row, 0UL, m, (~lhs).columns() ) ); } else { UnalignedTarget target( submatrix<unaligned>( ~lhs, row, 0UL, m, (~lhs).columns() ) ); subAssign( target, submatrix<unaligned>( ~rhs, row, 0UL, m, (~lhs).columns() ) ); } } } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Backend of the OpenMP-based SMP subtraction assignment of a column-major dense matrix // to a dense matrix. // \ingroup math // // \param lhs The target left-hand side dense matrix. // \param rhs The right-hand side column-major dense matrix to be subtracted. // \return void // // This function is the backend implementation of the OpenMP-based SMP subtraction assignment // of a column-major dense matrix to a dense matrix.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename MT1 // Type of the left-hand side dense matrix , bool SO // Storage order of the left-hand side dense matrix , typename MT2 > // Type of the right-hand side dense matrix void smpSubAssign_backend( DenseMatrix<MT1,SO>& lhs, const DenseMatrix<MT2,columnMajor>& rhs ) { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" ); typedef typename MT1::ElementType ET1; typedef typename MT2::ElementType ET2; typedef IntrinsicTrait<typename MT1::ElementType> IT; typedef typename SubmatrixExprTrait<MT1,aligned>::Type AlignedTarget; typedef typename SubmatrixExprTrait<MT1,unaligned>::Type UnalignedTarget; const bool vectorizable( MT1::vectorizable && MT2::vectorizable && IsSame<ET1,ET2>::value ); const bool lhsAligned ( (~lhs).isAligned() ); const bool rhsAligned ( (~rhs).isAligned() ); const int threads ( omp_get_num_threads() ); const size_t addon ( ( ( (~lhs).columns() % threads ) != 0UL )? 1UL : 0UL ); const size_t equalShare ( (~lhs).columns() / threads + addon ); const size_t rest ( equalShare & ( IT::size - 1UL ) ); const size_t colsPerThread( ( vectorizable && rest )?( equalShare - rest + IT::size ):( equalShare ) ); #pragma omp for schedule(dynamic,1) nowait for( int i=0UL; i<threads; ++i ) { const size_t column( i*colsPerThread ); if( column >= (~lhs).columns() ) continue; const size_t n( min( colsPerThread, (~lhs).columns() - column ) ); if( vectorizable && lhsAligned && rhsAligned ) { AlignedTarget target( submatrix<aligned>( ~lhs, 0UL, column, (~lhs).rows(), n ) ); subAssign( target, submatrix<aligned>( ~rhs, 0UL, column, (~lhs).rows(), n ) ); } else if( vectorizable && lhsAligned ) { AlignedTarget target( submatrix<aligned>( ~lhs, 0UL, column, (~lhs).rows(), n ) ); subAssign( target, submatrix<unaligned>( ~rhs, 0UL, column, (~lhs).rows(), n ) ); } else if( vectorizable && rhsAligned ) { UnalignedTarget target( submatrix<unaligned>( ~lhs, 0UL, column, (~lhs).rows(), n ) ); subAssign( target, submatrix<aligned>( ~rhs, 0UL, column, (~lhs).rows(), n ) ); } else { UnalignedTarget target( submatrix<unaligned>( ~lhs, 0UL, column, (~lhs).rows(), n ) ); subAssign( target, submatrix<unaligned>( ~rhs, 0UL, column, (~lhs).rows(), n ) ); } } } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Backend of the OpenMP-based SMP subtraction assignment of a row-major sparse matrix // to a dense matrix. // \ingroup math // // \param lhs The target left-hand side dense matrix. // \param rhs The right-hand side row-major sparse matrix to be subtracted. // \return void // // This function is the backend implementation of the OpenMP-based SMP subtraction assignment // of a row-major sparse matrix to a dense matrix.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename MT1 // Type of the left-hand side dense matrix , bool SO // Storage order of the left-hand side dense matrix , typename MT2 > // Type of the right-hand side sparse matrix void smpSubAssign_backend( DenseMatrix<MT1,SO>& lhs, const SparseMatrix<MT2,rowMajor>& rhs ) { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" ); typedef typename MT1::ElementType ET1; typedef typename MT2::ElementType ET2; typedef typename SubmatrixExprTrait<MT1,unaligned>::Type UnalignedTarget; const int threads ( omp_get_num_threads() ); const size_t addon ( ( ( (~lhs).rows() % threads ) != 0UL )? 1UL : 0UL ); const size_t rowsPerThread( (~lhs).rows() / threads + addon ); #pragma omp for schedule(dynamic,1) nowait for( int i=0UL; i<threads; ++i ) { const size_t row( i*rowsPerThread ); if( row >= (~lhs).rows() ) continue; const size_t m( min( rowsPerThread, (~lhs).rows() - row ) ); UnalignedTarget target( submatrix<unaligned>( ~lhs, row, 0UL, m, (~lhs).columns() ) ); subAssign( target, submatrix<unaligned>( ~rhs, row, 0UL, m, (~lhs).columns() ) ); } } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Backend of the OpenMP-based SMP subtraction assignment of a column-major sparse matrix // to a dense matrix. // \ingroup math // // \param lhs The target left-hand side dense matrix. // \param rhs The right-hand side column-major sparse matrix to be subtracted. // \return void // // This function is the backend implementation of the OpenMP-based SMP subtraction assignment // of a column-major sparse matrix to a dense matrix.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename MT1 // Type of the left-hand side dense matrix , bool SO // Storage order of the left-hand side dense matrix , typename MT2 > // Type of the right-hand side sparse matrix void smpSubAssign_backend( DenseMatrix<MT1,SO>& lhs, const SparseMatrix<MT2,columnMajor>& rhs ) { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" ); typedef typename MT1::ElementType ET1; typedef typename MT2::ElementType ET2; typedef typename SubmatrixExprTrait<MT1,unaligned>::Type UnalignedTarget; const int threads ( omp_get_num_threads() ); const size_t addon ( ( ( (~lhs).columns() % threads ) != 0UL )? 1UL : 0UL ); const size_t colsPerThread( (~lhs).columns() / threads + addon ); #pragma omp for schedule(dynamic,1) nowait for( int i=0UL; i<threads; ++i ) { const size_t column( i*colsPerThread ); if( column >= (~lhs).columns() ) continue; const size_t n( min( colsPerThread, (~lhs).columns() - column ) ); UnalignedTarget target( submatrix<unaligned>( ~lhs, 0UL, column, (~lhs).rows(), n ) ); subAssign( target, submatrix<unaligned>( ~rhs, 0UL, column, (~lhs).rows(), n ) ); } } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Default implementation of the OpenMP-based SMP subtracction assignment to a dense matrix. // \ingroup smp // // \param lhs The target left-hand side dense matrix. // \param rhs The right-hand side matrix to be subtracted. // \return void // // This function implements the default OpenMP-based SMP subtraction assignment to a dense matrix. // Due to the explicit application of the SFINAE principle, this function can only be selected by // the compiler in case both operands are SMP-assignable and the element types of both operands // are not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename MT1 // Type of the left-hand side dense matrix , bool SO1 // Storage order of the left-hand side dense matrix , typename MT2 // Type of the right-hand side matrix , bool SO2 > // Storage order of the right-hand side matrix inline typename EnableIf< And< IsDenseMatrix<MT1> , Or< Not< IsSMPAssignable<MT1> > , Not< IsSMPAssignable<MT2> > > > >::Type smpSubAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs ) { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" ); BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" ); subAssign( ~lhs, ~rhs ); } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Implementation of the OpenMP-based SMP subtracction assignment to a dense matrix. // \ingroup smp // // \param lhs The target left-hand side dense matrix. // \param rhs The right-hand side matrix to be subtracted. // \return void // // This function implements the default OpenMP-based SMP subtraction assignment of a matrix to a // dense matrix. Due to the explicit application of the SFINAE principle, this function can only // be selected by the compiler in case both operands are SMP-assignable and the element types of // both operands are not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename MT1 // Type of the left-hand side dense matrix , bool SO1 // Storage order of the left-hand side dense matrix , typename MT2 // Type of the right-hand side matrix , bool SO2 > // Storage order of the right-hand side matrix inline typename EnableIf< And< IsDenseMatrix<MT1> , IsSMPAssignable<MT1> , IsSMPAssignable<MT2> > >::Type smpSubAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs ) { BLAZE_FUNCTION_TRACE; BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( typename MT1::ElementType ); BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( typename MT2::ElementType ); BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" ); BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" ); BLAZE_PARALLEL_SECTION { if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) { subAssign( ~lhs, ~rhs ); } else { #pragma omp parallel shared( lhs, rhs ) smpSubAssign_backend( ~lhs, ~rhs ); } } } /*! \endcond */ //************************************************************************************************* //================================================================================================= // // MULTIPLICATION ASSIGNMENT // //================================================================================================= //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Default implementation of the OpenMP-based SMP multiplication assignment to a dense matrix. // \ingroup smp // // \param lhs The target left-hand side dense matrix. // \param rhs The right-hand side matrix to be multiplied. // \return void // // This function implements the default OpenMP-based SMP multiplication assignment to a dense // matrix.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename MT1 // Type of the left-hand side dense matrix , bool SO1 // Storage order of the left-hand side matrix , typename MT2 // Type of the right-hand side matrix , bool SO2 > // Storage order of the right-hand side matrix inline typename EnableIf< IsDenseMatrix<MT1> >::Type smpMultAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs ) { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" ); BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" ); multAssign( ~lhs, ~rhs ); } /*! \endcond */ //************************************************************************************************* //================================================================================================= // // COMPILE TIME CONSTRAINT // //================================================================================================= //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ namespace { BLAZE_STATIC_ASSERT( BLAZE_OPENMP_PARALLEL_MODE ); } /*! \endcond */ //************************************************************************************************* } // namespace blaze #endif
edgebased_levelset_substep.h
// Kratos Multi-Physics // // Copyright (c) 2015, Pooyan Dadvand, Riccardo Rossi, CIMNE (International Center for Numerical Methods in Engineering) // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: // // - Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. // - Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the distribution. // - All advertising materials mentioning features or use of this software must display the following acknowledgement: // This product includes Kratos Multi-Physics technology. // - Neither the name of the CIMNE nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, // THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // HOLDERS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED ANDON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF // THE USE OF THISSOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Project Name: Kratos // Last Modified by: $Author: antonia $ // Date: $Date: 2009-01-14 16:24:38 $ // Revision: $Revision: 1.11 $ // // #if !defined(KRATOS_EDGEBASED_LEVELSET_SUBSTEP_FLUID_SOLVER_H_INCLUDED) #define KRATOS_EDGEBASED_LEVELSET_SUBSTEP_FLUID_SOLVER_H_INCLUDED // #define DEBUG_OUTPUT //#define SPLIT_OSS // #define SYMM_PRESS // System includes #include <string> #include <iostream> #include <algorithm> // #include <omp.h> // External includes // Project includes #include "includes/define.h" #include "includes/model_part.h" #include "includes/deprecated_variables.h" #include "includes/node.h" //#include "geometries/geometry.h" #include "utilities/geometry_utilities.h" #include "incompressible_fluid_application.h" #include "custom_utilities/edge_data_c2c.h" namespace Kratos { template<unsigned int TDim, class MatrixContainer, class TSparseSpace, class TLinearSolver> class EdgeBasedLevelSetSubstep { public: //name for the self defined structure typedef EdgesStructureTypeC2C<TDim> CSR_Tuple; typedef vector<CSR_Tuple> EdgesVectorType; //name for row start and column index vectors typedef vector<unsigned int> IndicesVectorType; //defining matrix type for test calculations typedef vector< array_1d<double, TDim> > CalcVectorType; //defining type for local storage of nodal values typedef vector<double> ValuesVectorType; //defining types for matrix operations typedef typename TSparseSpace::MatrixType TSystemMatrixType; typedef typename TSparseSpace::VectorType TSystemVectorType; typedef std::size_t SizeType; //constructor and destructor EdgeBasedLevelSetSubstep (MatrixContainer& mr_matrix_container, ModelPart& mr_model_part, const double viscosity, const double density, const Vector body_force, bool use_mass_correction, double edge_detection_angle, double stabdt_pressure_factor, double stabdt_convection_factor, double tau2_factor, bool assume_constant_dp ) : mr_matrix_container (mr_matrix_container), mr_model_part (mr_model_part), mstabdt_pressure_factor (stabdt_pressure_factor), mstabdt_convection_factor (stabdt_convection_factor), medge_detection_angle (edge_detection_angle), mtau2_factor (tau2_factor), massume_constant_dp (assume_constant_dp) { for (ModelPart::NodesContainerType::iterator it=mr_model_part.NodesBegin(); it!=mr_model_part.NodesEnd(); it++) it->FastGetSolutionStepValue (VISCOSITY) = viscosity; mMolecularViscosity = viscosity; // mViscosity = viscosity; noalias (mBodyForce) = body_force; mRho = density; mdelta_t_avg = 1000.0; max_dt = 1.0; muse_mass_correction = use_mass_correction; mshock_coeff = 0.7; mWallLawIsActive = false; mnumsubsteps=5; mmax_dt = 0.0; mcorner_coefficient = 30.0; //50.0; medge_coefficient = 2.0; //30.0; //10.0; // for (unsigned int i = 0; i < TDim; i++) mBodyForce[i] = 0; // mBodyForce[1] = -9.81; // // mRho = 1000.0; std::cout << "Edge based level set substep solver is created" << std::endl; }; ~EdgeBasedLevelSetSubstep() { }; void SetBodyForce( const Vector& body_force) { noalias(mBodyForce) = body_force; KRATOS_WATCH(mBodyForce); } //*********************************** //function to initialize fluid solver void Initialize ( ) { KRATOS_TRY //get number of nodes unsigned int n_nodes = mr_model_part.Nodes().size(); unsigned int n_edges = mr_matrix_container.GetNumberEdges(); //size data vectors mViscosity.resize (n_nodes); mr_matrix_container.SetToZero (mViscosity); mWork.resize (n_nodes); mr_matrix_container.SetToZero (mWork); mvel_n.resize (n_nodes); mr_matrix_container.SetToZero (mvel_n); mvel_n1.resize (n_nodes); mr_matrix_container.SetToZero (mvel_n1); mPn.resize (n_nodes); mr_matrix_container.SetToZero (mPn); mPn1.resize (n_nodes); mr_matrix_container.SetToZero (mPn1); mHmin.resize (n_nodes); mr_matrix_container.SetToZero (mHmin); mHavg.resize (n_nodes); mr_matrix_container.SetToZero (mHavg); mNodalFlag.resize (n_nodes); mr_matrix_container.SetToZero (mNodalFlag); mdistances.resize (n_nodes); mr_matrix_container.SetToZero (mdistances); mTauPressure.resize (n_nodes); mr_matrix_container.SetToZero (mTauPressure); mTauConvection.resize (n_nodes); mr_matrix_container.SetToZero (mTauConvection); mTau2.resize (n_nodes); mr_matrix_container.SetToZero (mTau2); mPi.resize (n_nodes); mr_matrix_container.SetToZero (mPi); mXi.resize (n_nodes); mr_matrix_container.SetToZero (mXi); mx.resize (n_nodes); mr_matrix_container.SetToZero (mx); mEdgeDimensions.resize (n_edges); mr_matrix_container.SetToZero (mEdgeDimensions); //convection variables mBeta.resize (n_nodes); mr_matrix_container.SetToZero (mBeta); mPiConvection.resize (n_nodes); mr_matrix_container.SetToZero (mPiConvection); mphi_n.resize (n_nodes); mr_matrix_container.SetToZero (mphi_n); mphi_n1.resize (n_nodes); mr_matrix_container.SetToZero (mphi_n1); mEps.resize (n_nodes); mr_matrix_container.SetToZero (mEps); // mD.resize(n_nodes); // mr_matrix_container.SetToZero(mD); mA.resize (n_nodes); mr_matrix_container.SetToZero (mA); mB.resize (n_nodes); mr_matrix_container.SetToZero (mB); mdiv_error.resize (n_nodes); mr_matrix_container.SetToZero (mdiv_error); mWallReductionFactor.resize (n_nodes); mr_matrix_container.SetToZero (mWallReductionFactor); mdiag_stiffness.resize (n_nodes); mr_matrix_container.SetToZero (mdiag_stiffness); mis_slip.resize (n_nodes); mis_visited.resize (n_nodes); macc.resize (n_nodes); mr_matrix_container.SetToZero (macc); // ValuesVectorType external_pressure; // external_pressure.resize(n_nodes); //read velocity and pressure data from Kratos mr_matrix_container.FillScalarFromDatabase (VISCOSITY, mViscosity, mr_model_part.Nodes() ); mr_matrix_container.FillVectorFromDatabase (VELOCITY, mvel_n1, mr_model_part.Nodes() ); mr_matrix_container.FillScalarFromDatabase (PRESSURE, mPn1, mr_model_part.Nodes() ); mr_matrix_container.FillOldScalarFromDatabase (PRESSURE, mPn, mr_model_part.Nodes() ); mr_matrix_container.FillOldVectorFromDatabase (VELOCITY, mvel_n, mr_model_part.Nodes() ); mr_matrix_container.FillCoordinatesFromDatabase (mx, mr_model_part.Nodes() ); //set flag for first time step mFirstStep = true; //loop to categorize boundary nodes std::vector< unsigned int> tempFixedVelocities; std::vector< array_1d<double,TDim> > tempFixedVelocitiesValues; std::vector< unsigned int> tempPressureOutletList; std::vector< unsigned int> tempDistanceList; for (ModelPart::NodesContainerType::iterator inode = mr_model_part.NodesBegin(); inode != mr_model_part.NodesEnd(); inode++) { int index = inode->FastGetSolutionStepValue (AUX_INDEX); if (inode->IsFixed (VELOCITY_X) ) //note that the variables can be either all fixed or no one fixed { if (inode->IsFixed (VELOCITY_Y) == false || inode->IsFixed (VELOCITY_Z) == false) { std::cout << "error found on the fixity of node " << inode->Id() << std::endl; KRATOS_THROW_ERROR (std::logic_error, "velocities can be either all fixed or none fixed", "") } tempFixedVelocities.push_back (index); tempFixedVelocitiesValues.push_back (mvel_n1[index]); } if (inode->IsFixed (DISTANCE) ) tempDistanceList.push_back (index); if (inode->IsFixed (PRESSURE) ) { tempPressureOutletList.push_back (index); // mPressureOutlet.push_back(external_pressure[index]); } } mFixedVelocities.resize (tempFixedVelocities.size(),false); mFixedVelocitiesValues.resize (tempFixedVelocitiesValues.size(),false); mPressureOutletList.resize (tempPressureOutletList.size(),false); mDistanceBoundaryList.resize (tempDistanceList.size(),false); mDistanceValuesList.resize (tempDistanceList.size(),false); #pragma omp parallel for for (int i=0; i<static_cast<int> (tempFixedVelocities.size() ); i++) { mFixedVelocities[i] = tempFixedVelocities[i]; mFixedVelocitiesValues[i] = tempFixedVelocitiesValues[i]; } #pragma omp parallel for for (int i=0; i<static_cast<int> (tempPressureOutletList.size() ); i++) { mPressureOutletList[i] = tempPressureOutletList[i]; } for (int i=0; i<static_cast<int> (tempDistanceList.size() ); i++) { mDistanceBoundaryList[i] = tempDistanceList[i]; } //compute slip normals and fill SlipList CalculateNormals (mr_model_part.Conditions() ); mr_matrix_container.WriteVectorToDatabase (NORMAL, mSlipNormal, mr_model_part.Nodes() ); if (TDim == 3) DetectEdges3D (mr_model_part.Conditions() ); //print number of nodes corresponding to the different types of boundary conditions // KRATOS_WATCH(mFixedVelocities.size()) // KRATOS_WATCH(mPressureOutletList.size()) // KRATOS_WATCH(mSlipBoundaryList.size()) //determine number of edges and entries unsigned int n_nonzero_entries = 2 * n_edges + n_nodes; //allocate memory for variables mL.resize (n_nodes, n_nodes, n_nonzero_entries); int number_of_threads= OpenMPUtils::GetNumThreads(); std::vector<int> row_partition (number_of_threads); OpenMPUtils::DivideInPartitions (n_nodes,number_of_threads,row_partition); for (int k = 0; k < number_of_threads; k++) { #pragma omp parallel if (OpenMPUtils::ThisThread() == k) { for (int i_node = static_cast<int> (row_partition[k]); i_node < static_cast<int> (row_partition[k + 1]); i_node++) { //loop over all nodes // for (unsigned int i_node = 0; i_node < n_nodes; i_node++) { //flag for considering diagonal matrix elements bool flag = 0; //loop over all neighbours for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex() [i_node]; csr_index != mr_matrix_container.GetRowStartIndex() [i_node + 1]; csr_index++) { //get global index of neighbouring node j unsigned int j_neighbour = mr_matrix_container.GetColumnIndex() [csr_index]; //define matrix structure row by row (the order does matter!) if ( (static_cast<int> (j_neighbour) > i_node) && (flag == 0) ) { //add diagonal/nodal contribution mL.push_back (i_node, i_node, 0.0); flag = 1; } //add non-diagonal/edge contribution mL.push_back (i_node, j_neighbour, 0.0); } //if diagonal element is the last non-zero element of the row if (flag == 0) mL.push_back (i_node, i_node, 0.0); } } } //compute minimum length of the surrounding edges CalculateEdgeLengths (mr_model_part.Nodes() ); //set the pressure projection to the body force value array_1d<double,3> temp = mRho * mBodyForce; for (ModelPart::NodesContainerType::iterator inode = mr_model_part.NodesBegin(); inode != mr_model_part.NodesEnd(); inode++) inode->FastGetSolutionStepValue (PRESS_PROJ) = temp; mr_matrix_container.FillScalarFromDatabase (POROSITY, mEps, mr_model_part.Nodes() ); //verify that neither h_min nor havg are 0 for (unsigned int i_node=0; i_node<mHmin.size(); i_node++) { if (mHmin[i_node] < 1e-20) KRATOS_THROW_ERROR ( std::logic_error,"hmin too small on node ",i_node+1) if (mHavg[i_node] < 1e-20) KRATOS_THROW_ERROR ( std::logic_error,"havg too small on node ",i_node+1) if (mHmin[i_node] > 1e20) KRATOS_THROW_ERROR ( std::logic_error,"hmin too big on node ",i_node+1) if (mHavg[i_node] > 1e20) KRATOS_THROW_ERROR ( std::logic_error,"havg too big on node ",i_node+1) } for (ModelPart::ElementsContainerType::iterator it=mr_model_part.ElementsBegin(); it!=mr_model_part.ElementsEnd(); it++) { if (it->Id() < 1) { KRATOS_THROW_ERROR (std::logic_error, "Element found with Id 0 or negative","") } double elem_vol = 0.0; if (TDim == 2) elem_vol = it->GetGeometry().Area(); else elem_vol = it->GetGeometry().Volume(); if (elem_vol <= 0) { std::cout << "error on element -> " << it->Id() << std::endl; KRATOS_THROW_ERROR (std::logic_error, "Area can not be lesser than 0","") } } KRATOS_CATCH ("") } void SetShockCapturingCoefficient (double coeff) { mshock_coeff = coeff; } void GatherValues() { KRATOS_TRY mr_matrix_container.FillScalarFromDatabase (VISCOSITY, mViscosity, mr_model_part.Nodes() ); mr_matrix_container.FillScalarFromDatabase (POROSITY, mEps, mr_model_part.Nodes() ); mr_matrix_container.FillScalarFromDatabase (PRESSURE, mPn1, mr_model_part.Nodes() ); mr_matrix_container.FillScalarFromDatabase (DISTANCE, mdistances, mr_model_part.Nodes() ); mr_matrix_container.FillVectorFromDatabase (VELOCITY, mvel_n1, mr_model_part.Nodes() ); mr_matrix_container.FillVectorFromDatabase(PRESS_PROJ, mXi, mr_model_part.Nodes()); mr_matrix_container.FillOldVectorFromDatabase (VELOCITY, mvel_n, mr_model_part.Nodes() ); mr_matrix_container.FillOldScalarFromDatabase (PRESSURE, mPn, mr_model_part.Nodes() ); KRATOS_CATCH("") } //*************************************** //function to set adequate time step size double ComputeTimeStep (const double CFLNumber, const double MaxDt) { KRATOS_TRY //save the maximum time step max_dt = MaxDt; //local variable for time step size //getting value of current velocity and of viscosity // mr_matrix_container.FillScalarFromDatabase (VISCOSITY, mViscosity, mr_model_part.Nodes() ); // mr_matrix_container.FillScalarFromDatabase (POROSITY, mEps, mr_model_part.Nodes() ); // mr_matrix_container.FillScalarFromDatabase (PRESSURE, mPn1, mr_model_part.Nodes() ); // mr_matrix_container.FillScalarFromDatabase (DISTANCE, mdistances, mr_model_part.Nodes() ); // mr_matrix_container.FillVectorFromDatabase (VELOCITY, mvel_n1, mr_model_part.Nodes() ); // mr_matrix_container.FillVectorFromDatabase(PRESS_PROJ, mXi, mr_model_part.Nodes()); // // mr_matrix_container.FillOldVectorFromDatabase (VELOCITY, mvel_n, mr_model_part.Nodes() ); // mr_matrix_container.FillOldScalarFromDatabase (PRESSURE, mPn, mr_model_part.Nodes() ); // mr_matrix_container.FillScalarFromDatabase(DIAMETER, mD, mr_model_part.Nodes()); // mr_matrix_container.FillScalarFromDatabase (LIN_DARCY_COEF, mA, mr_model_part.Nodes() ); // mr_matrix_container.FillScalarFromDatabase (NONLIN_DARCY_COEF, mB, mr_model_part.Nodes() ); // double delta_t_i = delta_t; //******************* //loop over all nodes int n_nodes = static_cast<int>(mvel_n1.size()); unsigned int n_proc = OpenMPUtils::GetNumThreads(); Vector dt_avg_vec(n_proc,1e10); Vector dt_vec(n_proc,1e10); Vector dt_avg_novisc_vec(n_proc,1e10); #pragma omp parallel for firstprivate(n_nodes) for (int i_node = 0; i_node < n_nodes; i_node++) { unsigned int my_id = OpenMPUtils::ThisThread(); double& delta_t = dt_vec[my_id]; double& mdelta_t_avg = dt_avg_vec[my_id]; double& delta_t_avg_novisc = dt_avg_novisc_vec[my_id]; const array_1d<double, TDim>& v_i = mvel_n1[i_node]; const double havg_i = mHavg[i_node]; const double hmin_i = mHmin[i_node]; const double eps_i = mEps[i_node]; //const double d_i = mD[i_node]; double nu = mViscosity[i_node]; // const double lindarcy_i = mA[i_node]; // const double nonlindarcy_i = mB[i_node]; double vel_norm = norm_2 (v_i); //double porosity_coefficient = ComputePorosityCoefficient(nu, vel_norm, eps_i, d_i); // double porosity_coefficient = ComputePorosityCoefficient( vel_norm, eps_i, lindarcy_i, nonlindarcy_i); vel_norm /= eps_i; //use CFL condition to compute time step size double delta_t_i = 1.0 / (vel_norm /hmin_i + nu / (hmin_i * hmin_i) /*+ porosity_coefficient*/); double delta_t_i_avg = 1.0 / (vel_norm /havg_i + nu / (havg_i * havg_i) /*+ porosity_coefficient*/); double delta_t_i_avg_novisc = 1.0 / (2.0 * vel_norm /havg_i ); //considering the most restrictive case of neighbor's velocities with similar direction but opposite sense. //loop over all neighbours for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex() [i_node]; csr_index != mr_matrix_container.GetRowStartIndex() [i_node + 1]; csr_index++) { //get global index of neighbouring node j unsigned int j_neighbour = mr_matrix_container.GetColumnIndex() [csr_index]; const array_1d<double, TDim>& v_j = mvel_n1[j_neighbour]; double v_diff_norm = 0.0; for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) { double temp = v_i[l_comp] - v_j[l_comp]; v_diff_norm += temp*temp; } v_diff_norm = sqrt (v_diff_norm); v_diff_norm /= eps_i; double delta_t_j = 1.0 / (v_diff_norm /havg_i + 4.0 * nu / (havg_i * havg_i) ); // double delta_t_j = 1.0 / (2.0 * v_diff_norm /hmin_i + 4.0 * nu / (hmin_i * hmin_i) ); double delta_t_j_avg_novisc = 1.0 / (2.0 * v_diff_norm /havg_i ); if (delta_t_j < delta_t_i) delta_t_i = delta_t_j; if (delta_t_j_avg_novisc < delta_t_i_avg_novisc) delta_t_i_avg_novisc = delta_t_j_avg_novisc; // if ((v_i_par >= 0.0 && v_j_par <= 0.0) || (v_i_par <= 0.0 && v_j_par >= 0.0)) // { // double delta_t_j = CFLNumber * 1.0 / (2.0 * norm_2(v_diff) /hmin_i + 4.0 * mViscosity / (hmin_i * hmin_i)); //// double delta_t_j = CFLNumber / ((fabs(v_i_par) + fabs(v_j_par)) / mHmin[i_node] + 2.0 * mViscosity / (mHmin[i_node] * mHmin[i_node])); // // KRATOS_WATCH(delta_t_j); // // KRATOS_WATCH(delta_t_i); // if (delta_t_j < delta_t_i) // delta_t_i = delta_t_j; // } } //choose the overall minimum of delta_t_i if (delta_t_i < delta_t) delta_t = delta_t_i; if (delta_t_i_avg < mdelta_t_avg) mdelta_t_avg = delta_t_i_avg; if (delta_t_i_avg_novisc < delta_t_avg_novisc) delta_t_avg_novisc = delta_t_i_avg_novisc; } //finalizing parallel computations double delta_t = dt_vec[0]; mdelta_t_avg = dt_avg_vec[0]; double delta_t_avg_novisc = dt_avg_novisc_vec[0]; for(unsigned int i=1; i<dt_vec.size(); i++) { if(delta_t > dt_vec[i]) delta_t = dt_vec[i]; if(mdelta_t_avg > dt_vec[i]) mdelta_t_avg = dt_avg_vec[i]; if(delta_t_avg_novisc > dt_vec[i]) delta_t_avg_novisc = dt_avg_novisc_vec[i]; } //take into account wall law in the estimation // int slip_size = mSlipBoundaryList.size(); // for (int i_slip = 0; i_slip < slip_size; i_slip++) // { // unsigned int i_node = mSlipBoundaryList[i_slip]; // double nu = mViscosity[i_node]; // // double delta_t_i = 0.25*mY_wall*mY_wall/nu; // // // Reducing wall friction for the large element near wall. Pooyan. // double reducing_factor = 1.00; // double h_min = mHavg[i_node]; // if(mY_wall < h_min) // reducing_factor = mY_wall / h_min; // delta_t_i /= reducing_factor; // // if (delta_t_i < delta_t) // delta_t = delta_t_i; // } // mdelta_t_avg = delta_t; //this should not be done ... remove it or decide what to do... delta_t_avg_novisc *= CFLNumber; // mnumsubsteps = ceil (delta_t_avg_novisc/delta_t); // mnumsubsteps += 1; //this is for security // delta_t *= CFLNumber; if (mnumsubsteps <= 1) { mnumsubsteps=1; delta_t_avg_novisc = delta_t; } //std::cout << "mdelta_t_avg =" << mdelta_t_avg <<std::endl; //std::cout << "delta_t =" << delta_t <<std::endl; //std::cout << "mnumsubsteps =" << mnumsubsteps <<std::endl; delta_t = delta_t_avg_novisc; // delta_t *= CFLNumber; //******************* //perform MPI syncronization of the dt (minimum should be kept) return delta_t; KRATOS_CATCH ("") } void ApplySmagorinsky (double MolecularViscosity, double Cs) { if (Cs != 0) { if (TDim == 3) ApplySmagorinsky3D (MolecularViscosity, Cs); else KRATOS_THROW_ERROR (std::logic_error,"smagorinsky not yet implemented in 2D",""); } } void UpdateFixedVelocityValues() { KRATOS_TRY //read velocity and pressure data from Kratos // ModelPart::NodesContainerType& rNodes = mr_model_part.Nodes(); // mr_matrix_container.FillVectorFromDatabase (VELOCITY, mvel_n1, rNodes); int fixed_size = mFixedVelocities.size(); #pragma omp parallel for firstprivate(fixed_size) for (int i_velocity = 0; i_velocity < fixed_size; i_velocity++) { unsigned int i_node = mFixedVelocities[i_velocity]; array_1d<double, TDim>& u_i_fix = mFixedVelocitiesValues[i_velocity]; const array_1d<double, TDim>& u_i = mvel_n1[i_node]; for (unsigned int comp = 0; comp < TDim; comp++) u_i_fix[comp] = u_i[comp]; } KRATOS_CATCH (""); } //********************************************************************************** //function to solve fluid equations - fractional step 1: compute fractional momentum void SolveStep1() { KRATOS_TRY //PREREQUISITES //variables for node based data handling ModelPart::NodesContainerType& rNodes = mr_model_part.Nodes(); int n_nodes = rNodes.size(); //storage of nodal values in local variables CalcVectorType rhs; rhs.resize (n_nodes); //read velocity and pressure data from Kratos // mr_matrix_container.FillVectorFromDatabase (VELOCITY, mvel_n1, rNodes); // mr_matrix_container.FillOldVectorFromDatabase (VELOCITY, mvel_n, rNodes); // mr_matrix_container.FillScalarFromDatabase (VISCOSITY, mViscosity, rNodes); // mr_matrix_container.FillScalarFromDatabase (PRESSURE, mPn1, rNodes); // mr_matrix_container.FillOldScalarFromDatabase (PRESSURE, mPn, rNodes); mr_matrix_container.FillScalarFromDatabase (DISTANCE, mdistances, mr_model_part.Nodes() ); // mr_matrix_container.FillScalarFromDatabase(DIAMETER, mD, mr_model_part.Nodes()); mr_matrix_container.FillScalarFromDatabase (POROSITY, mEps, mr_model_part.Nodes() ); mr_matrix_container.FillScalarFromDatabase (LIN_DARCY_COEF, mA, mr_model_part.Nodes() ); mr_matrix_container.FillScalarFromDatabase (NONLIN_DARCY_COEF, mB, mr_model_part.Nodes() ); //read time step size from Kratos ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); double delta_t = CurrentProcessInfo[DELTA_TIME]; //compute intrinsic time double time_inv_avg = 1.0/mdelta_t_avg; // if(mmax_dt < mdelta_t_avg) mmax_dt = mdelta_t_avg; // double time_inv_avg = 1.0/mmax_dt; double stabdt_pressure_factor = mstabdt_pressure_factor; double stabdt_convection_factor = mstabdt_convection_factor; //double tau2_factor = mtau2_factor; #pragma omp parallel for firstprivate(time_inv_avg,stabdt_pressure_factor,stabdt_convection_factor) for (int i_node = 0; i_node < n_nodes; i_node++) { // double& h_i = mHavg[i_node]; double& h_avg_i = mHavg[i_node]; double& h_min_i = mHmin[i_node]; array_1d<double, TDim>& a_i = mvel_n1[i_node]; const double nu_i = mViscosity[i_node]; const double eps_i = mEps[i_node]; //const double d_i = mD[i_node]; const double lindarcy_i = mA[i_node]; const double nonlindarcy_i = mB[i_node]; double vel_norm = norm_2 (a_i); //double porosity_coefficient = ComputePorosityCoefficient(nu_i, vel_norm, eps_i, d_i); double porosity_coefficient = ComputePorosityCoefficient (vel_norm, eps_i, lindarcy_i, nonlindarcy_i); vel_norm /= eps_i; double tau = 1.0 / (2.0 * vel_norm / h_min_i + stabdt_pressure_factor*time_inv_avg + (4.0*nu_i) / (h_avg_i * h_avg_i) + porosity_coefficient); double tau_conv = 1.0 / (2.0 * vel_norm / h_min_i + stabdt_convection_factor*time_inv_avg ); mTauPressure[i_node] = tau; mTauConvection[i_node] = tau_conv; // mTau2[i_node] = (nu_i + h_avg_i*vel_norm*0.5) *tau2_factor; } // //smoothen the tau press - mTau2 used as temp var // #pragma omp parallel for // for (int i_node = 0; i_node < n_nodes; i_node++) // { // double& tau = mTau2[i_node]; //****************** // tau = mTauPressure[i_node]; // double counter = 1.0; // //const double& p_i = pressure[i_node]; // for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex() [i_node]; csr_index != mr_matrix_container.GetRowStartIndex() [i_node + 1]; csr_index++) // { // unsigned int j_neighbour = mr_matrix_container.GetColumnIndex() [csr_index]; // tau += mTauPressure[j_neighbour]; // counter+=1.0; // } // tau/=counter; // } // // mTauPressure = mTau2; //calculating the convective projection #pragma omp parallel for for (int i_node = 0; i_node < n_nodes; i_node++) { array_1d<double, TDim>& pi_i = mPi[i_node]; //****************** //setting to zero for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) pi_i[l_comp] = 0.0; array_1d<double, TDim> a_i = mvel_n1[i_node]; const array_1d<double, TDim>& U_i = mvel_n1[i_node]; const double& eps_i = mEps[i_node]; a_i /= eps_i; //const double& p_i = pressure[i_node]; for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex() [i_node]; csr_index != mr_matrix_container.GetRowStartIndex() [i_node + 1]; csr_index++) { unsigned int j_neighbour = mr_matrix_container.GetColumnIndex() [csr_index]; array_1d<double, TDim> a_j = mvel_n1[j_neighbour]; const array_1d<double, TDim>& U_j = mvel_n1[j_neighbour]; const double& eps_j = mEps[j_neighbour]; a_j /= eps_j; CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues() [csr_index]; edge_ij.Add_ConvectiveContribution (pi_i, a_i, U_i, a_j, U_j); // edge_ij.Add_grad_p(pi_i, p_i, p_j); } // const double m_inv = mr_matrix_container.GetInvertedMass() [i_node]; // for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) // pi_i[l_comp] *= m_inv; } int inout_size = mInOutBoundaryList.size(); //#pragma omp parallel for firstprivate(slip_size) for (int i = 0; i < inout_size; i++) { unsigned int i_node = mInOutBoundaryList[i]; // double dist = mdistances[i_node]; // if (dist <= 0.0) // { const array_1d<double, TDim>& U_i = mvel_n1[i_node]; const array_1d<double, TDim>& an_i = mInOutNormal[i_node]; double projection_length = 0.0; //double Ain = 0.0; for (unsigned int comp = 0; comp < TDim; comp++) { projection_length += U_i[comp] * an_i[comp]; } array_1d<double, TDim>& pi_i = mPi[i_node]; for (unsigned int comp = 0; comp < TDim; comp++) pi_i[comp] += projection_length * U_i[comp] ; // } } #pragma omp parallel for for (int i_node = 0; i_node < n_nodes; i_node++) { array_1d<double, TDim>& pi_i = mPi[i_node]; const double m_inv = mr_matrix_container.GetInvertedMass() [i_node]; for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) pi_i[l_comp] *= m_inv; } // //completing with boundary integrals // //loop over all faces // for (ModelPart::ConditionsContainerType::iterator cond_it = mr_model_part.ConditionsBegin(); cond_it != mr_model_part.ConditionsEnd(); cond_it++) // { // //get geometry data of the face // Geometry<Node < 3 > >& face_geometry = cond_it->GetGeometry(); // // //reference for area normal of the face // array_1d<double, 3 > & face_normal = cond_it->GetValue(NORMAL); // double A = norm_2(face_normal); // // unsigned int i_node0 = static_cast<unsigned int> (face_geometry[0].FastGetSolutionStepValue(AUX_INDEX)); // unsigned int i_node1 = static_cast<unsigned int> (face_geometry[1].FastGetSolutionStepValue(AUX_INDEX)); // unsigned int i_node2 = static_cast<unsigned int> (face_geometry[2].FastGetSolutionStepValue(AUX_INDEX)); // // if(face_geometry[0].IsFixed(VELOCITY_X) && face_geometry[1].IsFixed(VELOCITY_X) && face_geometry[2].IsFixed(VELOCITY_X)) // { // // //KRATOS_WATCH(cond_it->Id()); // // if (static_cast<bool>(cond_it->GetValue(IS_STRUCTURE)) == false) // //{ // const array_1d<double,TDim>& v_0 = mvel_n1[i_node0]; // const array_1d<double,TDim>& v_1 = mvel_n1[i_node1]; // const array_1d<double,TDim>& v_2 = mvel_n1[i_node2]; // double An0 = inner_prod(v_0,face_normal) / (A*mEps[i_node0]); // double An1 = inner_prod(v_1,face_normal) / (A*mEps[i_node1]); // double An2 = inner_prod(v_2,face_normal) / (A*mEps[i_node2]); // //KRATOS_WATCH(face_normal); // mPi[i_node0] -= ((2.0*An0+An1+An2)*0.5*0.333333333333333333333333333333*0.5)*face_normal; // mPi[i_node1] -= ((An0+2.0*An1+An2)*0.5*0.333333333333333333333333333333*0.5)*face_normal; // mPi[i_node2] -= ((An0+An1+2.0*An2)*0.5*0.333333333333333333333333333333*0.5)*face_normal; // } // //} // } // // // //calculating the convective projection // #pragma omp parallel for // for (int i_node = 0; i_node < n_nodes; i_node++) // { // array_1d<double, TDim>& pi_i = mPi[i_node]; //****************** // const double m_inv = mr_matrix_container.GetInvertedMass() [i_node]; // for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) // pi_i[l_comp] *= m_inv; // } // // // KRATOS_WATCH("step1 before rk loop") // KRATOS_WATCH(mnumsubsteps) // KRATOS_WATCH(mPn) // KRATOS_WATCH(mPn1) // KRATOS_WATCH(mPi) // KRATOS_WATCH(mvel_n1) // KRATOS_WATCH(mvel_n) #ifdef DEBUG_OUTPUT KRATOS_WATCH("before RK of step1 - new") double aux_v=0.0; for (int i_node = 0; i_node < mvel_n1.size(); i_node++) aux_v += inner_prod(mvel_n1[i_node],mvel_n1[i_node]); double aux_oldv=0.0; for (int i_node = 0; i_node < mvel_n1.size(); i_node++) aux_oldv += inner_prod(mvel_n[i_node],mvel_n[i_node]); double aux_pi=0.0; for (int i_node = 0; i_node < mvel_n1.size(); i_node++) aux_pi += inner_prod(mPi[i_node],mPi[i_node]); KRATOS_WATCH(inner_prod(mPn,mPn)); KRATOS_WATCH(aux_v); KRATOS_WATCH(aux_oldv); KRATOS_WATCH(aux_pi); KRATOS_WATCH(inner_prod(mdistances,mdistances)); KRATOS_WATCH(inner_prod(mViscosity,mViscosity)); #endif CalcVectorType auxn = mvel_n; double n_substeps = mnumsubsteps+1; double reduced_it = 0; double energy_initial = 0.0; double energy_final = 1.0; //compute initial kinetic energy #pragma omp parallel for firstprivate(n_nodes) reduction(+:energy_initial) for (int i_node = 0; i_node < n_nodes; i_node++) if (mdistances[i_node] <= 0.0) energy_initial += mr_matrix_container.GetLumpedMass()[i_node] * inner_prod(mvel_n[i_node],mvel_n[i_node]); //KRATOS_WATCH(energy_initial) // KRATOS_WATCH(n_substeps) while(reduced_it++ < 2 ) { double delta_t_substep = delta_t/n_substeps; for (unsigned int substep = 0; substep<n_substeps; substep++) { //std::cout << "substep " << substep+1 << " of " << n_substeps << std::endl; mr_matrix_container.AssignVectorToVector (mvel_n, mWork); //mWork = mvel_n //first step of Runge Kutta mr_matrix_container.AssignVectorToVector (mvel_n, mvel_n1); //mvel_n1 = mvel_n mr_matrix_container.SetToZero (rhs); CalculateRHS (mvel_n1, mPn, mvel_n1, rhs,mdiag_stiffness); Add_Effective_Inverse_Multiply (mWork, mWork, delta_t_substep / 6.0, mr_matrix_container.GetLumpedMass(),mdiag_stiffness,rhs); Add_Effective_Inverse_Multiply (mvel_n1, mvel_n, 0.5 * delta_t_substep, mr_matrix_container.GetLumpedMass(),mdiag_stiffness, rhs); ApplyVelocityBC (mvel_n1); //second step mr_matrix_container.SetToZero (rhs); CalculateRHS (mvel_n1, mPn, mvel_n1, rhs,mdiag_stiffness); Add_Effective_Inverse_Multiply (mWork, mWork, delta_t_substep / 3.0, mr_matrix_container.GetLumpedMass(),mdiag_stiffness, rhs); Add_Effective_Inverse_Multiply (mvel_n1, mvel_n, 0.5 * delta_t_substep, mr_matrix_container.GetLumpedMass(),mdiag_stiffness, rhs); ApplyVelocityBC (mvel_n1); //third step mr_matrix_container.SetToZero (rhs); CalculateRHS (mvel_n1, mPn, mvel_n1, rhs,mdiag_stiffness); Add_Effective_Inverse_Multiply (mWork, mWork, delta_t_substep / 3.0, mr_matrix_container.GetLumpedMass(),mdiag_stiffness, rhs); Add_Effective_Inverse_Multiply (mvel_n1, mvel_n, delta_t_substep, mr_matrix_container.GetLumpedMass(),mdiag_stiffness, rhs); ApplyVelocityBC (mvel_n1); //fourth step mr_matrix_container.SetToZero (rhs); CalculateRHS (mvel_n1, mPn, mvel_n1, rhs,mdiag_stiffness); Add_Effective_Inverse_Multiply (mWork, mWork, delta_t_substep / 6.0, mr_matrix_container.GetLumpedMass(),mdiag_stiffness, rhs); //compute right-hand side mr_matrix_container.AssignVectorToVector (mWork, mvel_n1); ApplyVelocityBC (mvel_n1); //prepare for next step mr_matrix_container.AssignVectorToVector (mvel_n1, mvel_n); } energy_final = 0.0; //compute initial kinetic energy #pragma omp parallel for firstprivate(n_nodes) reduction(+:energy_final) for (int i_node = 0; i_node < n_nodes; i_node++) if (mdistances[i_node] <= 0.0) energy_final += mr_matrix_container.GetLumpedMass()[i_node] * inner_prod(mvel_n1[i_node],mvel_n1[i_node]); //put back the original velocity at step n mr_matrix_container.AssignVectorToVector (auxn, mvel_n); if(energy_final < 1.5*energy_initial) break; else n_substeps*=10; if(reduced_it > 1) { KRATOS_WATCH(energy_initial) KRATOS_WATCH(energy_final) KRATOS_WATCH(n_substeps) } } // mr_matrix_container.WriteVectorToDatabase (VELOCITY, mvel_n1, mr_model_part.Nodes() ); // KRATOS_WATCH("end of step1") // KRATOS_WATCH(mvel_n1) // KRATOS_WATCH(mvel_n) #ifdef DEBUG_OUTPUT KRATOS_WATCH("end of step1 - new") aux_v=0.0; for (int i_node = 0; i_node < mvel_n1.size(); i_node++) aux_v += inner_prod(mvel_n1[i_node],mvel_n1[i_node]); double aux_xi=0.0; for (int i_node = 0; i_node < mvel_n1.size(); i_node++) aux_xi += inner_prod(mXi[i_node],mXi[i_node]); KRATOS_WATCH(inner_prod(mPn,mPn)); KRATOS_WATCH(inner_prod(mdistances,mdistances)); KRATOS_WATCH(inner_prod(mViscosity,mViscosity)); KRATOS_WATCH(aux_v); KRATOS_WATCH(aux_xi); #endif KRATOS_CATCH ("") } //********************************************************************* //function to calculate right-hand side of fractional momentum equation void CalculateRHS ( const CalcVectorType& vel, const ValuesVectorType& pressure, const CalcVectorType& convective_velocity, CalcVectorType& rhs, ValuesVectorType& diag_stiffness) { KRATOS_TRY int n_nodes = vel.size(); //perform MPI syncronization //calculating the RHS array_1d<double, TDim> stab_low; array_1d<double, TDim> stab_high; double inverse_rho = 1.0 / mRho; #pragma omp parallel for private(stab_low,stab_high) for (int i_node = 0; i_node < n_nodes; i_node++) { double dist = mdistances[i_node]; if (dist <= 0.0) //node is inside domain ---- if outside do nothing { const double nu_i = mViscosity[i_node]; const double nu_j = nu_i; array_1d<double, TDim>& rhs_i = rhs[i_node]; const array_1d<double, TDim>& f_i = mBodyForce; array_1d<double, TDim> a_i = convective_velocity[i_node]; const array_1d<double, TDim>& U_i = vel[i_node]; const array_1d<double, TDim>& pi_i = mPi[i_node]; const double& p_i = pressure[i_node]; const double& eps_i = mEps[i_node]; const double lindarcy_i = mA[i_node]; const double nonlindarcy_i = mB[i_node]; double edge_tau = mTauConvection[i_node]; a_i /= eps_i; //initializing with the external forces (e.g. gravity) double& m_i = mr_matrix_container.GetLumpedMass() [i_node]; for (unsigned int comp = 0; comp < TDim; comp++) rhs_i[comp] = m_i * eps_i * f_i[comp] ; //applying the effect of the porosity double porosity_coefficient = ComputePorosityCoefficient ( norm_2 (U_i), eps_i, lindarcy_i, nonlindarcy_i); diag_stiffness[i_node]= m_i * porosity_coefficient; //std::cout << i_node << "rhs =" << rhs_i << "after adding body force" << std::endl; //convective term for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex() [i_node]; csr_index != mr_matrix_container.GetRowStartIndex() [i_node + 1]; csr_index++) { unsigned int j_neighbour = mr_matrix_container.GetColumnIndex() [csr_index]; array_1d<double, TDim> a_j = convective_velocity[j_neighbour]; const array_1d<double, TDim>& U_j = vel[j_neighbour]; const array_1d<double, TDim>& pi_j = mPi[j_neighbour]; const double& p_j = pressure[j_neighbour]; const double& eps_j = mEps[j_neighbour]; // const double& beta_j = mBeta[j_neighbour]; a_j /= eps_j; CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues() [csr_index]; edge_ij.Sub_ConvectiveContribution (rhs_i, a_i, U_i, a_j, U_j); //std::cout << i_node << "rhs =" << rhs_i << "after convective contrib" << std::endl; //take care! we miss including a B.C. for the external pressure //edge_ij.Add_Gp (rhs_i,p_i*inverse_rho,p_j*inverse_rho); edge_ij.Sub_grad_p(rhs_i, p_i*inverse_rho*eps_i, p_j * inverse_rho*eps_i); // edge_ij.Add_grad_p(rhs_i, p_i*inverse_rho, p_j * inverse_rho); //std::cout << i_node << "rhs =" << rhs_i << "after Gp" << std::endl; edge_ij.Sub_ViscousContribution (rhs_i, U_i, nu_i, U_j, nu_j); // edge_ij.Add_ViscousContribution(rhs_i, U_i, nu_i, U_j, nu_j); //std::cout << i_node << "rhs =" << rhs_i << "after viscous" << std::endl; //add stabilization edge_ij.CalculateConvectionStabilization_LOW (stab_low, a_i, U_i, a_j, U_j); // edge_ij.CalculateConvectionStabilization_LOW(stab_low, a_i, U_i,p_i, a_j, U_j,p_j); edge_ij.CalculateConvectionStabilization_HIGH (stab_high, a_i, pi_i, a_j, pi_j); // double beta = 1.0; // double beta = beta_i; // if(beta_j > beta) // beta = beta_j; // beta = 1.0; // edge_ij.Sub_StabContribution(rhs_i, edge_tau*beta, 1.0, stab_low, stab_high); // edge_ij.Sub_StabContribution(rhs_i, edge_tau, (1.0-beta), stab_low, stab_high); edge_ij.Sub_StabContribution (rhs_i, edge_tau, 1.0, stab_low, stab_high); } // std::cout << i_node << "rhs =" << rhs_i << std::endl; } } int inout_size = mInOutBoundaryList.size(); //#pragma omp parallel for firstprivate(slip_size) for (int i = 0; i < inout_size; i++) { unsigned int i_node = mInOutBoundaryList[i]; // double dist = mdistances[i_node]; // if (dist <= 0.0) // { const array_1d<double, TDim>& U_i = mvel_n1[i_node]; const array_1d<double, TDim>& an_i = mInOutNormal[i_node]; double projection_length = 0.0; double Ain = 0.0; for (unsigned int comp = 0; comp < TDim; comp++) { projection_length += U_i[comp] * an_i[comp]; Ain += an_i[comp]*an_i[comp]; } array_1d<double, TDim>& rhs_i = rhs[i_node]; for (unsigned int comp = 0; comp < TDim; comp++) rhs_i[comp] += projection_length * U_i[comp] ; // } } /* for (int i = 0; i < mSlipBoundaryList.size(); i++) { int i_node = mSlipBoundaryList[i]; double dist = mdistances[i_node]; if (dist <= 0.0) //node is inside domain ---- if outside do nothing { const double& p_i = pressure[i_node]; const array_1d<double,3>& Ani = mSlipNormal[i_node]; array_1d<double, TDim>& rhs_i = rhs[i_node]; array_1d<double, TDim> temp; for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) temp[l_comp] = 0.0; for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++) { unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index]; if(mdistances[j_neighbour] <= 0.0 && mis_slip[j_neighbour] == true) { //const double& p_j = pressure[j_neighbour]; array_1d<double,3> Anj = mSlipNormal[j_neighbour]; Anj /= norm_2(Anj); for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) temp[l_comp] += p_i*Anj[l_comp]; } } //take out part in the direction of Ani double Ai = norm_2(Ani); double aux = 0.0; for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) aux += temp[l_comp]*Ani[l_comp]; for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) temp[l_comp] -= aux *Ani[l_comp] / (Ai*Ai); for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) rhs_i[l_comp] -= 0.25*Ai*temp[l_comp]; } }*/ // KRATOS_WATCH("finished**************************************************") */ /* //correction to the pressure graient //loop over all faces CalcVectorType press_correction(vel.size()); mr_matrix_container.SetToZero(press_correction); // mr_matrix_container.SetToZero(slip_area); for (ModelPart::ConditionsContainerType::iterator cond_it = mr_model_part.ConditionsBegin(); cond_it != mr_model_part.ConditionsEnd(); cond_it++) { //get geometry data of the face Geometry<Node < 3 > >& face_geometry = cond_it->GetGeometry(); //reference for area normal of the face array_1d<double, 3 > & face_normal = cond_it->GetValue(NORMAL); double A = norm_2(face_normal); unsigned int i_node0 = static_cast<unsigned int> (face_geometry[0].FastGetSolutionStepValue(AUX_INDEX)); unsigned int i_node1 = static_cast<unsigned int> (face_geometry[1].FastGetSolutionStepValue(AUX_INDEX)); unsigned int i_node2 = static_cast<unsigned int> (face_geometry[2].FastGetSolutionStepValue(AUX_INDEX)); if (static_cast<bool>(cond_it->GetValue(IS_STRUCTURE)) == true) { const double& p_0 = pressure[i_node0]; const double& p_1 = pressure[i_node1]; const double& p_2 = pressure[i_node2]; //TODO: we should only keep the part orthogonal to the external normal on each node!!!! press_correction[i_node0] -= ((2.0*p_0+p_1+p_2)*0.5*0.333333333333333333333333333333*0.5*inverse_rho)*face_normal; press_correction[i_node1] -= ((p_0+2.0*p_1+p_2)*0.5*0.333333333333333333333333333333*0.5*inverse_rho)*face_normal; press_correction[i_node2] -= ((p_0+p_1+2.0*p_2)*0.5*0.333333333333333333333333333333*0.5*inverse_rho)*face_normal; } else { const array_1d<double,TDim>& v_0 = vel[i_node0]; const array_1d<double,TDim>& v_1 = vel[i_node1]; const array_1d<double,TDim>& v_2 = vel[i_node2]; double An0 = inner_prod(v_0,face_normal) / (A*A); double An1 = inner_prod(v_1,face_normal) / (A*A); double An2 = inner_prod(v_2,face_normal) / (A*A); rhs[i_node0] -= ((2.0*An0+An1+An2)*0.5*0.333333333333333333333333333333*0.5)*face_normal; rhs[i_node1] -= ((An0+2.0*An1+An2)*0.5*0.333333333333333333333333333333*0.5)*face_normal; rhs[i_node2] -= ((An0+An1+2.0*An2)*0.5*0.333333333333333333333333333333*0.5)*face_normal; } } //slip condition int slip_size = mSlipBoundaryList.size(); #pragma omp parallel for firstprivate(slip_size) for (int i_slip = 0; i_slip < slip_size; i_slip++) { unsigned int i_node = mSlipBoundaryList[i_slip]; double dist = mdistances[i_node]; if (dist <= 0.0 && mis_slip[i_node] == true) { array_1d<double, TDim>& rhs_i = rhs[i_node]; // array_1d<double, TDim>& an_i = mSlipNormal[i_node]; // double normalization = 0.0; // for (unsigned int comp = 0; comp < TDim; comp++) // { // normalization += an_i[comp] * an_i[comp]; // } // normalization = sqrt(normalization); array_1d<double,TDim>& press_corr_i = press_correction[i_node]; for (unsigned int comp = 0; comp < TDim; comp++) rhs_i[comp] += press_corr_i[comp]; //we should remove here the normal component!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! } } */ //apply wall resistance if (mWallLawIsActive == true) ComputeWallResistance (vel,diag_stiffness); // ModelPart::NodesContainerType& rNodes = mr_model_part.Nodes(); // mr_matrix_container.WriteVectorToDatabase (VELOCITY, mvel_n1, rNodes); KRATOS_CATCH ("") } //************************************************************************* //function to solve fluid equations - fractional step 2: calculate pressure int SolveStep2 (typename TLinearSolver::Pointer pLinearSolver) { KRATOS_TRY // typedef Node < 3 > PointType; // typedef PointerVector<PointType > PointVector; // typedef PointVector::iterator PointIterator; #pragma omp parallel for for ( int i_node = 0; i_node < static_cast<int>(mr_model_part.Nodes().size()); i_node++) mis_visited[i_node] = 0; int layer_counter = -1; boost::numeric::ublas::vector<int> layers(mr_model_part.Nodes().size()); boost::numeric::ublas::vector<int> layer_limits(3); //Re-generate a container with LAYER 0 and LAYER 1 after convection of the free surface layer_limits[0] = 0; #pragma omp parallel for for (int i_node = 0; i_node < static_cast<int>(mr_model_part.Nodes().size()); i_node++) { if(mdistances[i_node] < 0.0) { for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++) { //get global index of neighbouring node j unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index]; if(mdistances[j_neighbour] >= 0.0 && mis_visited[i_node] == 0 ) { #pragma omp critical layers[++layer_counter] = i_node; mis_visited[i_node] = 1; break; } } } else mPn1[i_node] = 0.0; } layer_limits[1] = layer_counter; for(unsigned int i=0; i<static_cast<unsigned int>(layer_limits[1]); i++) { unsigned int i_node = layers[i]; for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++) { unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index]; if( mdistances[j_neighbour] >= 0.0 && mis_visited[j_neighbour] == 0) { layers[layer_counter++] = j_neighbour; mis_visited[j_neighbour] = 2; } } } layer_limits[2] = layer_counter; int return_value = 0; //on the first layer outside the pressure is set to a value such that on the free surface the pressure is approx 0 #pragma omp parallel for for( int iii=static_cast<int>(layer_limits[1]); iii<static_cast<int>(layer_limits[2]); iii++) { unsigned int i_node = layers[iii]; array_1d<double, TDim> grad_d; for (unsigned int comp = 0; comp < TDim; comp++) grad_d[comp] = 0.0; double dist_i = mdistances[i_node]; for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex() [i_node]; csr_index != mr_matrix_container.GetRowStartIndex() [i_node + 1]; csr_index++) { //get global index of neighbouring node j unsigned int j_neighbour = mr_matrix_container.GetColumnIndex() [csr_index]; const double& dist_j = mdistances[j_neighbour]; //projection of pressure gradients CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues() [csr_index]; edge_ij.Add_grad_p (grad_d, dist_i, dist_j); } const double& m_inv = mr_matrix_container.GetInvertedMass() [i_node]; for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) grad_d[l_comp] *= m_inv; double norm_grad = norm_2 (grad_d); if (norm_grad < 2.0) { if(dist_i < 0.01*mHavg[i_node] ) dist_i = 0.0; else if(dist_i > 2.0*mHavg[i_node] ) { KRATOS_WATCH("distance is much larger than expected!!") dist_i = 2.0*mHavg[i_node]; } if(norm_grad > 0.001) { grad_d /= norm_grad; //this is the direction of the gradient of the distances grad_d *= dist_i; //this is the vector with the distance of node_i from the closest point on the free surface } else { KRATOS_WATCH("norm grad is very small!!!!") grad_d *= 0.0; } const array_1d<double, TDim>& press_grad = mXi[i_node]; //iii->FastGetSolutionStepValue (PRESS_PROJ); double pestimate = inner_prod (press_grad,grad_d); mPn1[i_node] = pestimate; // KRATOS_WATCH("peastimate step2") // KRATOS_WATCH(iii->Id()) // KRATOS_WATCH(grad_d) // KRATOS_WATCH(press_grad) // KRATOS_WATCH(pestimate) } else { std::cout << "attention gradient of distance much greater than 1 on node:" << i_node <<std::endl; return_value = -1; // return -1; double avg_number = 0.0; double pavg = 0.0; for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++) { unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index]; if( mis_visited[j_neighbour] == 1) { pavg += mPn1[j_neighbour]; avg_number += 1.0; } } if (avg_number == 0) KRATOS_THROW_ERROR (std::logic_error,"can not happen that the extrapolation node has no neighbours",""); mPn1[i_node] = pavg/avg_number; } } //if a node is very close to the free surface (relatively to the element size) fix the pressure on it // for(ModelPart::NodesContainerType::iterator iii = mr_model_part.NodesBegin(); iii!=mr_model_part.NodesEnd(); iii++) // { // unsigned int i_node = iii->FastGetSolutionStepValue(AUX_INDEX); // // double dist = mdistances[i_node]; // if(dist > 0.0 && dist < 0.01*mHavg[i_node]) // iii->FastGetSolutionStepValue(PRESSURE) = 0.0; // // } //PREREQUISITES //allocate memory for variables ModelPart::NodesContainerType& rNodes = mr_model_part.Nodes(); int n_nodes = rNodes.size(); //unknown and right-hand side vector TSystemVectorType dp, rhs; dp.resize (n_nodes); rhs.resize (n_nodes); array_1d<double, TDim> dU_i, dU_j, work_array; //read time step size from Kratos ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); double delta_t = CurrentProcessInfo[DELTA_TIME]; #ifdef _OPENMP // double time_inv = 0.0; //1.0/delta_t; //read the pressure projection from the database #endif // mr_matrix_container.FillOldScalarFromDatabase (PRESSURE, mPn, mr_model_part.Nodes() ); // mr_matrix_container.FillScalarFromDatabase (PRESSURE, mPn1, mr_model_part.Nodes() ); // mr_matrix_container.FillVectorFromDatabase (PRESS_PROJ, mXi, rNodes); // mr_matrix_container.FillVectorFromDatabase (VELOCITY, mvel_n1, rNodes); //for (int i_node = 0; i_node < n_nodes; i_node++) // std::cout << mvel_n1[i_node] << std::endl; //loop over all nodes // double rho_inv = 1.0 / mRho; #pragma omp parallel for for (int i_node = 0; i_node < n_nodes; i_node++) { double& rhs_i = rhs[i_node]; rhs_i = 0.0; const double& p_i = mPn1[i_node]; const double& p_old_i = mPn[i_node]; const array_1d<double, TDim>& U_i_curr = mvel_n1[i_node]; // const double& eps_i = mEps[i_node]; array_1d<double, TDim>& xi_i = mXi[i_node]; double l_ii = 0.0; // double div_i = 0.0; //loop over all neighbours for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex() [i_node]; csr_index != mr_matrix_container.GetRowStartIndex() [i_node + 1]; csr_index++) { unsigned int j_neighbour = mr_matrix_container.GetColumnIndex() [csr_index]; const double& p_j = mPn1[j_neighbour]; const double& p_old_j = mPn[j_neighbour]; const array_1d<double, TDim>& U_j_curr = mvel_n1[j_neighbour]; const array_1d<double, TDim>& xi_j = mXi[j_neighbour]; // const double& eps_j = mEps[j_neighbour]; CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues() [csr_index]; #ifdef SYMM_PRESS double edge_tau = 0.25* (mTauPressure[i_node] + mTauPressure[j_neighbour]); #else double edge_tau = 0.5*mTauPressure[i_node]; #endif // double edge_tau = CalculateEdgeTau(time_inv,h_i,a_i,h_j,a_j); // if (edge_tau < delta_t) edge_tau=delta_t; //compute laplacian operator double sum_l_ikjk; edge_ij.CalculateScalarLaplacian (sum_l_ikjk); // double sum_l_ikjk_onlystab = sum_l_ikjk * (edge_tau); double sum_l_ikjk_onlydt = sum_l_ikjk * (delta_t); sum_l_ikjk *= (delta_t + edge_tau); //assemble right-hand side //pressure contribution // rhs_i -= sum_l_ikjk_onlystab * (p_j - p_i); rhs_i -= sum_l_ikjk * (p_j - p_i); rhs_i += sum_l_ikjk_onlydt * (p_old_j - p_old_i); //calculating the divergence of the fract vel // edge_ij.Sub_D_v(div_i, U_i_curr*mRho*eps_i, U_j_curr * mRho*eps_j); edge_ij.Sub_D_v (rhs_i, U_i_curr*mRho, U_j_curr * mRho); // edge_ij.Sub_D_v(rhs_i,a_i*rho_i,a_j*rho_i); //high order stabilizing term double temp = 0.0; // edge_ij.Add_div_v(temp,mTauPressure[i_node]*xi_i,mTauPressure[j_neighbour]*xi_j); edge_ij.Add_div_v (temp, xi_i, xi_j); rhs_i += edge_tau * temp; //assemble laplacian matrix mL (i_node, j_neighbour) = sum_l_ikjk; l_ii -= sum_l_ikjk; } // //area correction to prevent mass loss // rhs_i -= mdiv_error[i_node]; // rhs_i += div_i * eps_i; mL (i_node, i_node) = l_ii; } if (muse_mass_correction == true) { #pragma omp parallel for for (int i_node = 0; i_node < n_nodes; i_node++) { double& rhs_i = rhs[i_node]; rhs_i -= mdiv_error[i_node]; } } //find the max diagonal term double max_diag = 0.0; for (int i_node = 0; i_node < n_nodes; i_node++) { double L_diag = mL (i_node, i_node); if (fabs (L_diag) > fabs (max_diag) ) max_diag = L_diag; } max_diag *= 1e10; // if (max_diag < 1e20) max_diag=1e20; //respect pressure boundary conditions by penalization // double huge = max_diag * 1e6; // for (unsigned int i_pressure = 0; i_pressure < mPressureOutletList.size(); i_pressure++) { // unsigned int i_node = mPressureOutletList[i_pressure]; // mL(i_node, i_node) = huge; // rhs[i_node] = 0.0; // } for (unsigned int i_pressure = 0; i_pressure < mPressureOutletList.size(); i_pressure++) { unsigned int i_node = mPressureOutletList[i_pressure]; mL (i_node, i_node) = max_diag; rhs[i_node] = 0.0; for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex() [i_node]; csr_index != mr_matrix_container.GetRowStartIndex() [i_node + 1]; csr_index++) { unsigned int j_neighbour = mr_matrix_container.GetColumnIndex() [csr_index]; mL (i_node, j_neighbour) = 0.0; } } //modification for level_set // mr_matrix_container.FillScalarFromDatabase(DISTANCE, mdistances, mr_model_part.Nodes()); // for (unsigned int i_dist = 0; i_dist < mdistances.size(); i_dist++) // { // if(mdistances[i_dist] >= 0) // { // mL(i_dist, i_dist) = huge; // rhs[i_dist] = 0.0; // } // } #pragma omp parallel for for (int i_node = 0; i_node < n_nodes; i_node++) { if (mdistances[i_node] >= 0) { mL (i_node, i_node) = max_diag; rhs[i_node] = 0.0; for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex() [i_node]; csr_index != mr_matrix_container.GetRowStartIndex() [i_node + 1]; csr_index++) { unsigned int j_neighbour = mr_matrix_container.GetColumnIndex() [csr_index]; mL (i_node, j_neighbour) = 0.0; } } else { for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex() [i_node]; csr_index != mr_matrix_container.GetRowStartIndex() [i_node + 1]; csr_index++) { unsigned int j_neighbour = mr_matrix_container.GetColumnIndex() [csr_index]; if (mdistances[j_neighbour] >= 0) mL (i_node, j_neighbour) = 0.0; } } } // for (int i_node = 0; i_node < n_nodes; i_node++) // { // if( fabs(mL(i_node, i_node)) < 1e-20) // { // mL(i_node, i_node)=max_diag; // rhs[i_node] = 0.0; // KRATOS_WATCH("arghhhhhhhhhhhhhhhhhhhhhhhhhhhhhh"); // } // } //compute row scaling factors TSystemVectorType scaling_factors (n_nodes); double* Lvalues = mL.value_data().begin(); SizeType* Lrow_indices = mL.index1_data().begin(); SizeType* Lcol_indices = mL.index2_data().begin(); #pragma omp parallel for for (int k = 0; k < static_cast< int> (mL.size1() ); k++) { double t = 0.0; SizeType col_begin = Lrow_indices[k]; SizeType col_end = Lrow_indices[k+1]; for (SizeType j=col_begin; j<col_end; j++) if ( static_cast<int> (Lcol_indices[j]) == k) { t = fabs (Lvalues[j]); break; } // t += Lvalues[j]*Lvalues[j]; // t = sqrt(t); scaling_factors[k] = 1.0/sqrt (t); } #pragma omp parallel for for (int k = 0; k < static_cast<int> (mL.size1() ); k++) { SizeType col_begin = Lrow_indices[k]; SizeType col_end = Lrow_indices[k+1]; double k_factor = scaling_factors[k]; rhs[k] *= k_factor; for (SizeType j=col_begin; j<col_end; j++) { Lvalues[j] *= scaling_factors[Lcol_indices[j]] * k_factor; } } //set starting vector for iterative solvers #pragma omp parallel for for (int i_node = 0; i_node < n_nodes; i_node++) dp[i_node] = 0.0; //KRATOS_WATCH(rhs); //solve linear equation system L dp = rhs pLinearSolver->Solve (mL, dp, rhs); //KRATOS_WATCH(*pLinearSolver) //update pressure #pragma omp parallel for for (int i_node = 0; i_node < n_nodes; i_node++) mPn1[i_node] += dp[i_node]*scaling_factors[i_node]; // for (unsigned int i_pressure = 0; i_pressure < mPressureOutletList.size(); i_pressure++) // { // unsigned int i_node = mPressureOutletList[i_pressure]; // mPn1[i_node] = mPressureOutlet[i_pressure]; // } //write pressure and density to Kratos mr_matrix_container.WriteScalarToDatabase (PRESSURE, mPn1, rNodes); //compute pressure proj for the next step #pragma omp parallel for private(work_array) for (int i_node = 0; i_node < n_nodes; i_node++) { array_1d<double, TDim>& xi_i = mXi[i_node]; for (unsigned int comp = 0; comp < TDim; comp++) xi_i[comp] = 0.0; double dist = mdistances[i_node]; if (dist <= 0.0) //node is inside domain ---- if outside do nothing { const double& p_i = mPn1[i_node]; for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex() [i_node]; csr_index != mr_matrix_container.GetRowStartIndex() [i_node + 1]; csr_index++) { //get global index of neighbouring node j unsigned int j_neighbour = mr_matrix_container.GetColumnIndex() [csr_index]; const double& p_j = mPn1[j_neighbour]; //projection of pressure gradients CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues() [csr_index]; edge_ij.Add_grad_p (xi_i, p_i, p_j); } const double& m_inv = mr_matrix_container.GetInvertedMass() [i_node]; for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) xi_i[l_comp] *= m_inv; } } mr_matrix_container.WriteVectorToDatabase (PRESS_PROJ, mXi, rNodes); // KRATOS_WATCH("end of step2") // KRATOS_WATCH(mPn) // KRATOS_WATCH(mPn1) // KRATOS_WATCH(mXi) #ifdef DEBUG_OUTPUT KRATOS_WATCH("end of step2 - new") double aux_v=0.0; for (int i_node = 0; i_node < mvel_n1.size(); i_node++) aux_v += inner_prod(mvel_n1[i_node],mvel_n1[i_node]); double aux_xi=0.0; for (int i_node = 0; i_node < mvel_n1.size(); i_node++) aux_xi += inner_prod(mXi[i_node],mXi[i_node]); KRATOS_WATCH(inner_prod(mPn1,mPn1)); KRATOS_WATCH(aux_v); KRATOS_WATCH(aux_xi); #endif return return_value; KRATOS_CATCH ("") } //********************************************************************************** //function to solve fluid equations - fractional step 3: correct fractional momentum void SolveStep3() { KRATOS_TRY //get number of nodes ModelPart::NodesContainerType& rNodes = mr_model_part.Nodes(); int n_nodes = rNodes.size(); //define work array array_1d<double, TDim> correction; //read time step size from Kratos ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); double delta_t = CurrentProcessInfo[DELTA_TIME]; double factor = 0.5; if (massume_constant_dp == true) factor = 1.0; //compute end of step momentum double rho_inv = 1.0 / mRho; #pragma omp parallel for private(correction) firstprivate(delta_t,rho_inv,factor) for (int i_node = 0; i_node < n_nodes; i_node++) { double dist = mdistances[i_node]; if (dist < 0.0) //node is inside domain ---- if outside do nothing { array_1d<double, TDim>& U_i_curr = mvel_n1[i_node]; double delta_p_i = (mPn1[i_node] - mPn[i_node]) * rho_inv*factor; // const double m_inv = mr_matrix_container.GetInvertedMass()[i_node]; //setting to zero for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) correction[l_comp] = 0.0; //compute edge contributions dt*M^(-1)Gp for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex() [i_node]; csr_index != mr_matrix_container.GetRowStartIndex() [i_node + 1]; csr_index++) { unsigned int j_neighbour = mr_matrix_container.GetColumnIndex() [csr_index]; double delta_p_j = (mPn1[j_neighbour] - mPn[j_neighbour]) * rho_inv*factor; CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues() [csr_index]; // edge_ij.Sub_grad_p(correction,delta_p_i,delta_p_j); edge_ij.Sub_grad_p(correction, delta_p_i, delta_p_j); // edge_ij.Add_grad_p(correction, delta_p_i, delta_p_j); //edge_ij.Add_Gp (correction,delta_p_i,delta_p_j); // edge_ij.Sub_Gp(correction,delta_p_i,delta_p_j); } //compute prefactor // double coefficient = delta_t * m_inv; const double m = mr_matrix_container.GetLumpedMass() [i_node]; const double& d = mdiag_stiffness[i_node]; //correct fractional momentum for (unsigned int comp = 0; comp < TDim; comp++) { U_i_curr[comp] += delta_t / (m + delta_t*d) * correction[comp]; } } } // //imit acceleration // #pragma omp parallel for // for(int i_node = 0; i_node < n_nodes; i_node++) // { // array_1d<double,TDim>& acc = macc[i_node]; // array_1d<double,TDim>& v1 = mvel_n1[i_node]; // array_1d<double,TDim>& v = mvel_n[i_node]; // // for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) // acc[l_comp] = (v1[l_comp] - v[l_comp])/delta_t; // // //limit accelerations to a maximum=100m/s/2 // const double max_acc = 200; // double acc_norm = norm_2(acc); // if(acc_norm > max_acc) // { // std::cout << "########################### acc norm " << acc_norm <<std::endl; // // acc *= max_acc/acc_norm; // v1 = v; // v1 += delta_t*acc; // } // } ApplyVelocityBC (mvel_n1); //save acceleration #pragma omp parallel for for(int i_node = 0; i_node < n_nodes; i_node++) { array_1d<double,TDim>& acc = macc[i_node]; array_1d<double,TDim>& v1 = mvel_n1[i_node]; array_1d<double,TDim>& v = mvel_n[i_node]; for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) acc[l_comp] = (v1[l_comp] - v[l_comp])/delta_t; } //write velocity of time step n+1 to Kratos //calculate the error on the divergence if (muse_mass_correction == true) { #pragma omp parallel for private(correction) firstprivate(delta_t,rho_inv) for (int i_node = 0; i_node < n_nodes; i_node++) { const double dist = mdistances[i_node]; double& div_i_err = mdiv_error[i_node]; div_i_err = 0.0; if (dist < 0.0) //node is inside domain ---- if outside do nothing { const array_1d<double, TDim>& U_i_curr = mvel_n1[i_node]; //compute edge contributions dt*M^(-1)Gp for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex() [i_node]; csr_index != mr_matrix_container.GetRowStartIndex() [i_node + 1]; csr_index++) { unsigned int j_neighbour = mr_matrix_container.GetColumnIndex() [csr_index]; array_1d<double, TDim>& U_j_curr = mvel_n1[j_neighbour]; CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues() [csr_index]; edge_ij.Add_D_v (div_i_err, U_i_curr*mRho, U_j_curr * mRho); } } } } #ifdef DEBUG_OUTPUT KRATOS_WATCH("end of step 3") double aux=0.0; for (int i_node = 0; i_node < n_nodes; i_node++) aux += inner_prod(mvel_n1[i_node],mvel_n1[i_node]); KRATOS_WATCH(inner_prod(mPn1,mPn1)); KRATOS_WATCH(aux); #endif mr_matrix_container.WriteVectorToDatabase (VELOCITY, mvel_n1, rNodes); KRATOS_CATCH ("") } void ApplyDistanceBC() { KRATOS_TRY //slip condition int size = mDistanceBoundaryList.size(); #pragma omp parallel for firstprivate(size) for (int i_dist = 0; i_dist < size; i_dist++) { unsigned int i_node = mDistanceBoundaryList[i_dist]; double& dist = mdistances[i_node]; dist = mDistanceValuesList[i_dist]; } //fix the distance if velocity goes inwards // int slip_size = mSlipBoundaryList.size(); // #pragma omp parallel for firstprivate(slip_size) // for (int i_slip = 0; i_slip < slip_size; i_slip++) // { // unsigned int i_node = mSlipBoundaryList[i_slip]; // double dist = mphi_n[i_node]; // // if(dist > 0.0) // // { // array_1d<double, TDim>& U_i = mvel_n1[i_node]; // array_1d<double, TDim>& an_i = mSlipNormal[i_node]; // double projection_length = 0.0; // double normalization = 0.0; // for (unsigned int comp = 0; comp < TDim; comp++) // { // projection_length += U_i[comp] * an_i[comp]; // } // if(projection_length > 0.0) // dist = mphi_n[i_node]; // // } // } KRATOS_CATCH ("") } //************************************ void ApplyVelocityBC (CalcVectorType& VelArray) { KRATOS_TRY // if(mWallLawIsActive == false) // { // std::cout << "applying corners condition" << std::endl; // apply conditions on corner edges // int edge_size = medge_nodes_direction.size(); // #pragma omp parallel for firstprivate(edge_size) // for (int i = 0; i < edge_size; i++) // { // int i_node = medge_nodes[i]; // const array_1d<double, TDim>& direction = medge_nodes_direction[i]; // double dist = mdistances[i_node]; // // if(dist <= 0.0) // { // array_1d<double, TDim>& U_i = VelArray[i_node]; // // for (unsigned int comp = 0; comp < TDim; comp++) // // U_i[comp] = 0.0; // // double temp=0.0; // for (unsigned int comp = 0; comp < TDim; comp++) // temp += U_i[comp] * direction[comp]; // // for (unsigned int comp = 0; comp < TDim; comp++) // U_i[comp] = direction[comp]*temp; // } // } // // // // //apply conditions on corners // int corner_size = mcorner_nodes.size(); // for (int i = 0; i < corner_size; i++) // { // int i_node = mcorner_nodes[i]; // // array_1d<double, TDim>& U_i = VelArray[i_node]; // for (unsigned int comp = 0; comp < TDim; comp++) // U_i[comp] = 0.0; // } // //apply conditions on corners int corner_size = mcorner_nodes.size(); for (int i = 0; i < corner_size; i++) { int i_node = mcorner_nodes[i]; array_1d<double, TDim>& U_i = VelArray[i_node]; // if(mdistances[i_node] <= 0.0) // { array_1d<double, TDim> aux; for (unsigned int comp = 0; comp < TDim; comp++) aux[comp] = 0.0; double counter = 0.0; for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++) { //get global index of neighbouring node j unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index]; const double& dist_j = mdistances[j_neighbour]; array_1d<double, TDim>& vj = VelArray[j_neighbour]; if(dist_j <= 0 && mis_slip[j_neighbour] == false) { counter += 1.0; for (unsigned int comp = 0; comp < TDim; comp++) aux[comp] += vj[comp]; } } if(counter != 0.0) for (unsigned int comp = 0; comp < TDim; comp++) U_i[comp] = aux[comp]/counter; // } } // } //slip condition int slip_size = mSlipBoundaryList.size(); #pragma omp parallel for firstprivate(slip_size) for (int i_slip = 0; i_slip < slip_size; i_slip++) { unsigned int i_node = mSlipBoundaryList[i_slip]; double dist = mdistances[i_node]; if (dist <= 0.0) { array_1d<double, TDim>& U_i = VelArray[i_node]; array_1d<double, TDim>& an_i = mSlipNormal[i_node]; double projection_length = 0.0; double normalization = 0.0; for (unsigned int comp = 0; comp < TDim; comp++) { projection_length += U_i[comp] * an_i[comp]; normalization += an_i[comp] * an_i[comp]; } projection_length /= normalization; //tangential momentum as difference between original and normal momentum for (unsigned int comp = 0; comp < TDim; comp++) U_i[comp] -= projection_length * an_i[comp]; } } // //loop over all faces // ValuesVectorType vel_correction(VelArray.size()); // // CalcVectorType slip_area(VelArray.size()); // int iterations = 10; // for(unsigned int i=0;i<iterations; i++) // { // mr_matrix_container.SetToZero(vel_correction); // // mr_matrix_container.SetToZero(slip_area); // for (ModelPart::ConditionsContainerType::iterator cond_it = mr_model_part.ConditionsBegin(); cond_it != mr_model_part.ConditionsEnd(); cond_it++) // { // if (static_cast<bool>(cond_it->GetValue(IS_STRUCTURE)) == true) // { // //get geometry data of the face // Geometry<Node < 3 > >& face_geometry = cond_it->GetGeometry(); // // //reference for area normal of the face // array_1d<double, 3 > & face_normal = cond_it->GetValue(NORMAL); // double n_area = norm_2(face_normal) / static_cast<double>(TDim); // // unsigned int i_node0 = static_cast<unsigned int> (face_geometry[0].FastGetSolutionStepValue(AUX_INDEX)); // unsigned int i_node1 = static_cast<unsigned int> (face_geometry[1].FastGetSolutionStepValue(AUX_INDEX)); // unsigned int i_node2 = static_cast<unsigned int> (face_geometry[2].FastGetSolutionStepValue(AUX_INDEX)); // // const array_1d<double, TDim>& U_0 = VelArray[i_node0]; // const array_1d<double, TDim>& U_1 = VelArray[i_node1]; // const array_1d<double, TDim>& U_2 = VelArray[i_node2]; // // double vn0=0.0; // double vn1=0.0; // double vn2=0.0; // if(mdistances[i_node0] <= 0 && face_geometry[0].IsFixed(VELOCITY_X) == false) vn0 = inner_prod(U_0,face_normal); // if(mdistances[i_node1] <= 0 && face_geometry[1].IsFixed(VELOCITY_X) == false) vn1 = inner_prod(U_1,face_normal); // if(mdistances[i_node2] <= 0 && face_geometry[2].IsFixed(VELOCITY_X) == false) vn2 = inner_prod(U_2,face_normal); // // double edge01 = 0.5*(vn0+vn1)*0.333333333333333333333333333333*0.5; // double edge02 = 0.5*(vn0+vn2)*0.333333333333333333333333333333*0.5; // double edge12 = 0.5*(vn2+vn2)*0.333333333333333333333333333333*0.5; // // vel_correction[i_node0] += edge01 + edge02; // vel_correction[i_node1] += edge01 + edge12; // vel_correction[i_node2] += edge02 + edge12; // // /* double tmp = 0.333333333333333333333333333333333*0.333333333333333333333333333333333*(vn0+vn1+vn2); // vel_correction[i_node0] += tmp; // vel_correction[i_node1] += tmp; // vel_correction[i_node2] += tmp; */ // } // } // // //slip condition // int slip_size = mSlipBoundaryList.size(); // #pragma omp parallel for firstprivate(slip_size) // for (int i_slip = 0; i_slip < slip_size; i_slip++) // { // unsigned int i_node = mSlipBoundaryList[i_slip]; // double dist = mdistances[i_node]; // if (dist <= 0.0) // { // array_1d<double, TDim>& U_i = VelArray[i_node]; // array_1d<double, TDim>& an_i = mSlipNormal[i_node]; // double normalization = 0.0; // for (unsigned int comp = 0; comp < TDim; comp++) // { // normalization += an_i[comp] * an_i[comp]; // } // //tangential momentum as difference between original and normal momentum // double coeff = vel_correction[i_node] / normalization; // for (unsigned int comp = 0; comp < TDim; comp++) // U_i[comp] += coeff * an_i[comp]; // } // } // } //fixed condition int fixed_size = mFixedVelocities.size(); #pragma omp parallel for firstprivate(fixed_size) for (int i_velocity = 0; i_velocity < fixed_size; i_velocity++) { unsigned int i_node = mFixedVelocities[i_velocity]; double dist = mdistances[i_node]; if (dist <= 0.0) { const array_1d<double, TDim>& u_i_fix = mFixedVelocitiesValues[i_velocity]; array_1d<double, TDim>& u_i = VelArray[i_node]; for (unsigned int comp = 0; comp < TDim; comp++) u_i[comp] = u_i_fix[comp]; } } KRATOS_CATCH ("") } //******************************** //function to compute coefficients void ExtrapolateValues (unsigned int extrapolation_layers) { KRATOS_TRY //ensure that corner nodes are wet if all of the nodes around them have a negative distance // typedef Node < 3 > PointType; // typedef PointerVector<PointType > PointVector; // typedef PointVector::iterator PointIterator; mr_matrix_container.FillScalarFromDatabase (DISTANCE, mdistances,mr_model_part.Nodes() ); #pragma omp parallel for for ( int i_node = 0; i_node < static_cast<int>(mr_model_part.Nodes().size()); i_node++) mis_visited[i_node] = 0.0; boost::numeric::ublas::vector<int> layers(mr_model_part.Nodes().size(),-1); // std::vector<int> layer_color(mr_model_part.Nodes().size(),-1000); boost::numeric::ublas::vector<int> layer_limits(extrapolation_layers+1); layer_limits[0] = 0; int layer_counter = -1; #pragma omp parallel for for (int i_node = 0; i_node < static_cast<int>( mr_model_part.Nodes().size()); i_node++) { if(mdistances[i_node] < 0.0) { for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++) { //get global index of neighbouring node j unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index]; if(mdistances[j_neighbour] >= 0.0 && mis_visited[i_node] == 0) { #pragma omp critical layers[++layer_counter] = i_node; mis_visited[i_node] = 1; break; } } } else { mvel_n1[i_node] = ZeroVector (TDim); mvel_n[i_node] = ZeroVector (TDim); mPn[i_node] = 0.0; mPn1[i_node] = 0.0; mXi[i_node] = ZeroVector (TDim); } } layer_limits[1] = layer_counter; //fill the following layers by neighbour relationships //each layer fills the following for (unsigned int il = 0; il < extrapolation_layers - 1; il++) { //parallelization not trivial for(unsigned int iii = static_cast<unsigned int>(layer_limits[il]); iii<static_cast<unsigned int>(layer_limits[il+1]); iii++) { unsigned int i_node = layers[iii]; for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++) { unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index]; if(mdistances[j_neighbour] >= 0.0 && mis_visited[j_neighbour] == 0) { layers[layer_counter++] = j_neighbour; mis_visited[j_neighbour] = il+2; } } } layer_limits[il+2] = layer_counter; } array_1d<double, TDim > aux, aux_proj; //ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); //double delta_t = CurrentProcessInfo[DELTA_TIME]; //fill the pressure projection on the first layer inside the fluid //by extrapolating from the pressure projection on the layer -1 (the first layer completely inside the domain) #pragma omp parallel for for(int i=layer_limits[0]; i<layer_limits[1]; i++) { unsigned int i_node = layers[i]; noalias (aux_proj) = ZeroVector (TDim); double avg_number = 0.0; for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++) { unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index]; if( mis_visited[j_neighbour] == 0) { const array_1d<double, TDim > & inside_press_grad = mXi[j_neighbour]; noalias (aux_proj) += inside_press_grad; avg_number += 1.0; } } if (avg_number != 0.0) //this case means that it has some neighbours that are completely internal { aux_proj /= avg_number; noalias (mXi[i_node] ) = aux_proj; } else //case in which there is not a layer of nodes completely internal { array_1d<double,TDim>& xi = mXi[i_node]; noalias ( xi ) = mRho*mBodyForce; noalias ( xi ) -= mRho*macc[i_node]; } } //perform extrapolation layer by layer by making an average //of the neighbours of lower order /* KRATOS_WATCH(extrapolation_layers) for (unsigned int il = 0; il < extrapolation_layers; il++) std::cout << layer_limits[il] << " "; std::cout << std::endl; std::cout << std::endl; for (unsigned int il = 0; il < extrapolation_layers; il++) { std::cout << "level = " << il << " nneighb = " << layer_limits[il+1] - layer_limits[il] << " -- "; for(unsigned int iii = layer_limits[il]; iii<layer_limits[il+1]; iii++) std::cout << layers[iii] << " "; std::cout << std::endl; } std::cout << std::endl; std::cout << " printing is visited " << std::endl; for (unsigned int i_node = 0; i_node < mr_model_part.Nodes().size(); i_node++) std::cout << mis_visited[i_node] << std::endl; std::cout << std::endl;*/ for (int il = 1; il < static_cast<int>(extrapolation_layers); il++) { //parallelization of this loop not trivial for(int iii = layer_limits[il]; iii<layer_limits[il+1]; iii++) { unsigned int i_node = layers[iii]; noalias (aux) = ZeroVector (TDim); noalias (aux_proj) = ZeroVector (TDim); double avg_number = 0.0; double pavg = 0.0; for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++) { unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index]; if (mis_visited[j_neighbour] < (il + 1) && mis_visited[j_neighbour] != 0) { const array_1d<double, TDim >& direction_vec = mEdgeDimensions[csr_index]; // noalias (direction_vec) -= coords_bottom; const array_1d<double, TDim >& press_grad = mXi[j_neighbour]; //i->FastGetSolutionStepValue (PRESS_PROJ); double temp = inner_prod (direction_vec, press_grad); double pestimate = mPn[j_neighbour] + temp; pavg += pestimate; noalias (aux_proj) += press_grad; noalias (aux) += mvel_n1[j_neighbour]; //i->FastGetSolutionStepValue (VELOCITY); avg_number += 1.0; } } if (avg_number != 0.0) { aux /= avg_number; pavg /= avg_number; aux_proj /= avg_number; // KRATOS_WATCH(avg_number); // KRATOS_WATCH(aux); // KRATOS_WATCH(pavg); // KRATOS_WATCH(aux_proj); } else { KRATOS_THROW_ERROR (std::runtime_error, "error in extrapolation:: no neighbours find on a extrapolation layer -- impossible", ""); // KRATOS_THROW_ERROR(std:logic_error,"error in extrapolation:: no neighbours find on a extrapolation layer -- impossible",""); } mvel_n1[i_node] = aux; mvel_n[i_node] = aux; mPn[i_node] = pavg; // mPn1[i_node] = pavg; mXi[i_node] = aux_proj; } } //mark nodes on which we will have to solve for convection //mark all of internal nodes #pragma omp parallel for for ( int i_node = 0; i_node < static_cast<int>(mr_model_part.Nodes().size()); i_node++) { if (mdistances[i_node] <= 0.0) mis_visited[i_node] = 1.0; else mis_visited[i_node] = 0.0; } //now mark all of the nodes up to the extrapolation layers - 1 for (unsigned int il = 0; il < extrapolation_layers-1; il++) { #pragma omp parallel for for( int iii = static_cast<int>(layer_limits[il]); iii<static_cast<int>(layer_limits[il+1]); iii++) { unsigned int i_node = layers[iii]; mis_visited[i_node] = 1.0; } } ApplyVelocityBC (mvel_n1); // mr_matrix_container.WriteVectorToDatabase (VELOCITY, mvel_n1, mr_model_part.Nodes() ); // KRATOS_WATCH("end of Extrapolate Values ") // KRATOS_WATCH(mvel_n1) // KRATOS_WATCH(mPn) // KRATOS_WATCH(mPn1) // KRATOS_WATCH(mXi) // KRATOS_WATCH(mdistances) #ifdef DEBUG_OUTPUT KRATOS_WATCH("end of extrapolate values - new") double aux_v=0.0; for (int i_node = 0; i_node < mvel_n1.size(); i_node++) aux_v += inner_prod(mvel_n1[i_node],mvel_n1[i_node]); double aux_xi=0.0; for (int i_node = 0; i_node < mvel_n1.size(); i_node++) aux_xi += inner_prod(mXi[i_node],mXi[i_node]); KRATOS_WATCH(inner_prod(mPn1,mPn1)); KRATOS_WATCH(aux_v); KRATOS_WATCH(aux_xi); #endif KRATOS_CATCH ("") } void ChangeSignToDistance() { KRATOS_TRY for (ModelPart::NodesContainerType::iterator inode = mr_model_part.NodesBegin(); inode != mr_model_part.NodesEnd(); inode++) { double dist = inode->FastGetSolutionStepValue (DISTANCE); inode->FastGetSolutionStepValue (DISTANCE) = -dist; } KRATOS_CATCH ("") } void MarkNodesByDistance (double min, double max) { KRATOS_TRY #pragma omp parallel for for ( int i_node = 0; i_node < static_cast<int>(mr_model_part.Nodes().size()); i_node++) { double& dist = mdistances[i_node]; if ( dist > min && dist < max ) mis_visited[i_node] = 1.0; else mis_visited[i_node] = 0.0; } KRATOS_CATCH ("") } void SaveScalarVariableToOldStep (Variable<double>& rVar) { KRATOS_TRY for (ModelPart::NodesContainerType::iterator inode = mr_model_part.NodesBegin(); inode != mr_model_part.NodesEnd(); inode++) { inode->FastGetSolutionStepValue (rVar, 1) = inode->FastGetSolutionStepValue (rVar); } KRATOS_CATCH ("") } void MarkExternalAndMixedNodes() { KRATOS_TRY #pragma omp parallel for for ( int i_node = 0; i_node < static_cast<int>(mr_model_part.Nodes().size()); i_node++) mis_visited[i_node] = 0; for (unsigned int i_node = 0; i_node < mr_model_part.Nodes().size(); i_node++) { if(mdistances[i_node] > 0.0) { mis_visited[i_node] = 1; for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++) { unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index]; mis_visited[j_neighbour] = 1; } } } KRATOS_CATCH ("") } void MarkInternalAndMixedNodes() { KRATOS_TRY #pragma omp parallel for for ( int i_node = 0; i_node < static_cast<int>(mr_model_part.Nodes().size()); i_node++) mis_visited[i_node] = 0; for (unsigned int i_node = 0; i_node < mr_model_part.Nodes().size(); i_node++) { if(mdistances[i_node] <= 0.0) { mis_visited[i_node] = 1; for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++) { unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index]; mis_visited[j_neighbour] = 1; } } } KRATOS_CATCH ("") } void MarkInternalNodes() { KRATOS_TRY #pragma omp parallel for for ( int i_node = 0; i_node < static_cast<int>(mr_model_part.Nodes().size()); i_node++) { if(mdistances[i_node] <= 0.0) mis_visited[i_node] = 1; else mis_visited[i_node] = 0; } KRATOS_CATCH ("") } //************************************** //function to calculate the area normals void CalculateNormals (ModelPart::ConditionsContainerType& rConditions) { KRATOS_TRY //calculate area normals face-by-face array_1d<double, 3 > area_normal; //2D case if (TDim == 2) { for (ModelPart::ConditionsContainerType::iterator cond_it = rConditions.begin(); cond_it != rConditions.end(); cond_it++) CalculateNormal2D (cond_it, area_normal); }//3D case else if (TDim == 3) { //help vectors for cross product array_1d<double, 3 > v1; array_1d<double, 3 > v2; for (ModelPart::ConditionsContainerType::iterator cond_it = rConditions.begin(); cond_it != rConditions.end(); cond_it++) CalculateNormal3D (cond_it, area_normal, v1, v2); } // area_normal *= -1; //CHAPUZA: REMOVE!!!s //(re)initialize normals unsigned int n_nodes = mNodalFlag.size(); mInOutNormal.resize (n_nodes); mSlipNormal.resize (n_nodes); for (unsigned int i_node = 0; i_node < n_nodes; i_node++) { noalias (mSlipNormal[i_node]) = ZeroVector (TDim); mis_slip[i_node] = false; noalias (mInOutNormal[i_node]) = ZeroVector (TDim); } //loop over all faces const double node_factor = 1.0 / TDim; for (ModelPart::ConditionsContainerType::iterator cond_it = rConditions.begin(); cond_it != rConditions.end(); cond_it++) { //get geometry data of the face Geometry<Node < 3 > >& face_geometry = cond_it->GetGeometry(); //reference for area normal of the face array_1d<double, 3 > & face_normal = cond_it->GetValue (NORMAL); //slip condition if (static_cast<bool> (cond_it->GetValue (IS_STRUCTURE) ) == true) for (unsigned int if_node = 0; if_node < TDim; if_node++) { unsigned int i_node = static_cast<unsigned int> (face_geometry[if_node].FastGetSolutionStepValue (AUX_INDEX) ); array_1d<double, TDim>& slip_normal = mSlipNormal[i_node]; mis_slip[i_node] = true; for (unsigned int comp = 0; comp < TDim; comp++) { slip_normal[comp] += node_factor * face_normal[comp]; } } } //fill the list of slip nodes std::vector< unsigned int> tempmSlipBoundaryList; for (unsigned int i_node = 0; i_node < n_nodes; i_node++) { if (mis_slip[i_node] == true) tempmSlipBoundaryList.push_back (i_node); mis_slip[i_node] = false; } mSlipBoundaryList.resize (tempmSlipBoundaryList.size(),false); #pragma omp parallel for for (int i=0; i<static_cast<int> (tempmSlipBoundaryList.size() ); i++) mSlipBoundaryList[i] = tempmSlipBoundaryList[i]; //check that all of the normals are not zero for (int i=0; i<static_cast<int> (mSlipBoundaryList.size() ); i++) { unsigned int i_node = mSlipBoundaryList[i]; double tmp = norm_2(mSlipNormal[i_node]); if(tmp < 1e-20) KRATOS_THROW_ERROR(std::logic_error,"found a slip node with zero normal on node with id",i_node+1) } //loop over all faces to fill inlet outlet for (ModelPart::ConditionsContainerType::iterator cond_it = rConditions.begin(); cond_it != rConditions.end(); cond_it++) { //get geometry data of the face Geometry<Node < 3 > >& face_geometry = cond_it->GetGeometry(); //reference for area normal of the face array_1d<double, 3 > & face_normal = cond_it->GetValue (NORMAL); bool is_inlet_or_outlet = false; if (cond_it->GetValue (IS_STRUCTURE) == 0) is_inlet_or_outlet = true; else { for (unsigned int if_node = 0; if_node < TDim; if_node++) if (face_geometry[if_node].IsFixed (VELOCITY_X) ) is_inlet_or_outlet = true; } //slip condition if (is_inlet_or_outlet) //the opposite of the loop before for (unsigned int if_node = 0; if_node < TDim; if_node++) { unsigned int i_node = static_cast<unsigned int> (face_geometry[if_node].FastGetSolutionStepValue (AUX_INDEX) ); array_1d<double, TDim>& inout_normal = mInOutNormal[i_node]; mis_slip[i_node] = true; //reutilize it! for (unsigned int comp = 0; comp < TDim; comp++) { inout_normal[comp] += node_factor * face_normal[comp]; } } } // KRATOS_WATCH( mInOutNormal[7-1] ); // KRATOS_THROW_ERROR(std::logic_error,"remove line 2318 " ,""); std::vector< unsigned int> tempmInOutBoundaryList; for (unsigned int i_node = 0; i_node < n_nodes; i_node++) { if (mis_slip[i_node] == true) tempmInOutBoundaryList.push_back (i_node); } mInOutBoundaryList.resize (tempmInOutBoundaryList.size(),false); #pragma omp parallel for for (int i=0; i<static_cast<int> (tempmInOutBoundaryList.size() ); i++) mInOutBoundaryList[i] = tempmInOutBoundaryList[i]; //store for future use the list of slip nodes #pragma omp parallel for for (int i=0; i<static_cast<int> (mis_slip.size() ); i++) mis_slip[ i ] = false; #pragma omp parallel for for (int i=0; i<static_cast<int> (mSlipBoundaryList.size() ); i++) mis_slip[ mSlipBoundaryList[i] ] = true; KRATOS_CATCH ("") } //******************************* //function to free dynamic memory void Clear() { KRATOS_TRY mViscosity.clear(); mWork.clear(); mvel_n.clear(); mvel_n1.clear(); mPn.clear(); mPn1.clear(); mHmin.clear(); mHavg.clear(); mSlipNormal.clear(); mNodalFlag.clear(); mFixedVelocities.clear(); mFixedVelocitiesValues.clear(); mPressureOutletList.clear(); // mPressureOutlet.clear(); mSlipBoundaryList.clear(); mL.clear(); mTauPressure.clear(); mTauConvection.clear(); mTau2.clear(); mBeta.clear(); mPiConvection.clear(); mphi_n.clear(); mphi_n1.clear(); mEps.clear(); // mD.clear(); mA.clear(); mB.clear(); mdiv_error.clear(); mWallReductionFactor.clear(); mdiag_stiffness.clear(); mis_slip.clear(); mis_visited.clear(); macc.clear(); KRATOS_CATCH ("") } void ConvectDistance() { KRATOS_TRY //variables for node based data handling ModelPart::NodesContainerType& rNodes = mr_model_part.Nodes(); int n_nodes = rNodes.size(); //storage of nodal values in local variables ValuesVectorType rhs, WorkConvection; rhs.resize (n_nodes); WorkConvection.resize (n_nodes); ValuesVectorType active_nodes; active_nodes.resize (n_nodes); // mr_matrix_container.FillScalarFromDatabase (POROSITY, mEps, mr_model_part.Nodes() ); //read variables from Kratos // mr_matrix_container.FillVectorFromDatabase (VELOCITY, mvel_n1, mr_model_part.Nodes() ); // mr_matrix_container.FillOldVectorFromDatabase (VELOCITY, mvel_n, mr_model_part.Nodes() ); mr_matrix_container.FillScalarFromDatabase (DISTANCE, mphi_n1, mr_model_part.Nodes() ); mr_matrix_container.FillOldScalarFromDatabase (DISTANCE, mphi_n, mr_model_part.Nodes() ); //get the "fresh" values to be fixed_size for (unsigned int i=0; i< mDistanceValuesList.size(); i++) { mDistanceValuesList[ i ] = mphi_n1[ mDistanceBoundaryList[i] ]; } //mr_matrix_container.AssignVectorToVector(mphi_n1, mphi_n); //mWork = mphi_n // //chapuza // //set the distance to zero when it tries to go out of the pressure boundary // int pressure_size = mPressureOutletList.size(); // #pragma omp parallel for firstprivate(pressure_size) // for (int iii = 0; iii < pressure_size; iii++) // { // unsigned int i_node = mPressureOutletList[iii]; // mphi_n1[i_node] = fabs(mphi_n1[i_node]); // mphi_n[i_node] = fabs(mphi_n[i_node]); // } //create and fill a vector of nodes for which we want to convect the velocity for (int i_node = 0; i_node < n_nodes; i_node++) { active_nodes[i_node] = mis_visited[i_node]; } // ComputeConvectiveProjection(mPiConvection,mphi_n1,mEps,mvel_n1); // ComputeLimitor(mPiConvection,mphi_n1,mBeta,mvel_n1,mEdgeDimensions); // mr_matrix_container.WriteScalarToDatabase(TEMPERATURE, active_nodes, rNodes); //read time step size from Kratos ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); double delta_t = CurrentProcessInfo[DELTA_TIME]; double n_substeps = mnumsubsteps; // del double delta_t_substep = delta_t/n_substeps; for (unsigned int substep = 0; substep<n_substeps; substep++) { mr_matrix_container.AssignVectorToVector (mphi_n, WorkConvection); //mWork = mphi_n //first step of Runge Kutta // mr_matrix_container.AssignVectorToVector(mphi_n,mphi_n1); //mphi_n1 = mphi_n mr_matrix_container.SetToZero (rhs); ComputeConvectiveProjection (mPiConvection,mphi_n1,mEps,mvel_n1); ComputeLimitor (mPiConvection,mphi_n1,mBeta,mvel_n1,mEdgeDimensions); CalculateRHS_convection (mphi_n1, mvel_n1, rhs, active_nodes); mr_matrix_container.Add_Minv_value (WorkConvection, WorkConvection, delta_t_substep / 6.0, mr_matrix_container.GetInvertedMass(), rhs); mr_matrix_container.Add_Minv_value (mphi_n1, mphi_n, 0.5 * delta_t_substep, mr_matrix_container.GetInvertedMass(), rhs); ApplyDistanceBC(); //second step mr_matrix_container.SetToZero (rhs); ComputeConvectiveProjection (mPiConvection,mphi_n1,mEps,mvel_n1); ComputeLimitor (mPiConvection,mphi_n1,mBeta,mvel_n1,mEdgeDimensions); CalculateRHS_convection (mphi_n1, mvel_n1, rhs, active_nodes); mr_matrix_container.Add_Minv_value (WorkConvection, WorkConvection, delta_t_substep / 3.0, mr_matrix_container.GetInvertedMass(), rhs); mr_matrix_container.Add_Minv_value (mphi_n1, mphi_n, 0.5 * delta_t_substep, mr_matrix_container.GetInvertedMass(), rhs); ApplyDistanceBC(); //third step mr_matrix_container.SetToZero (rhs); ComputeConvectiveProjection (mPiConvection,mphi_n1,mEps,mvel_n1); ComputeLimitor (mPiConvection,mphi_n1,mBeta,mvel_n1,mEdgeDimensions); CalculateRHS_convection (mphi_n1, mvel_n1, rhs, active_nodes); mr_matrix_container.Add_Minv_value (WorkConvection, WorkConvection, delta_t_substep / 3.0, mr_matrix_container.GetInvertedMass(), rhs); mr_matrix_container.Add_Minv_value (mphi_n1, mphi_n, delta_t_substep, mr_matrix_container.GetInvertedMass(), rhs); ApplyDistanceBC(); //fourth step mr_matrix_container.SetToZero (rhs); ComputeConvectiveProjection (mPiConvection,mphi_n1,mEps,mvel_n1); ComputeLimitor (mPiConvection,mphi_n1,mBeta,mvel_n1,mEdgeDimensions); CalculateRHS_convection (mphi_n1, mvel_n1, rhs, active_nodes); mr_matrix_container.Add_Minv_value (WorkConvection, WorkConvection, delta_t_substep / 6.0, mr_matrix_container.GetInvertedMass(), rhs); ApplyDistanceBC(); //compute right-hand side mr_matrix_container.AssignVectorToVector (WorkConvection, mphi_n1); mr_matrix_container.AssignVectorToVector (mphi_n1, mphi_n); } // // make sure that boundary nodes that are very close to the free surface get wet // int slip_size = mSlipBoundaryList.size(); // #pragma omp parallel for firstprivate(slip_size) // for (int i_slip = 0; i_slip < slip_size; i_slip++) { // unsigned int i_node = mSlipBoundaryList[i_slip]; // const double& h_i = mHmin[i_node]; // double& dist_i = mphi_n1[i_node]; // // if(dist_i > 0.0 && dist_i < 0.5*h_i) // { // //loop to all the edges surrounding node I // for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++) // { // unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index]; // if(mphi_n1[j_neighbour] <= 0.0) // dist_i = -0.01 * h_i; // } // } // // } // int fixed_size = mFixedVelocities.size(); // #pragma omp parallel for firstprivate(fixed_size) // for (int i_velocity = 0; i_velocity < fixed_size; i_velocity++) { // unsigned int i_node = mFixedVelocities[i_velocity]; // const double& h_i = mHmin[i_node]; // double& dist_i = mphi_n1[i_node]; // // if(dist_i > 0.0 && dist_i < 0.5*h_i) // { // //loop to all the edges surrounding node I // for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++) // { // unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index]; // if(mphi_n1[j_neighbour] <= 0.0) // dist_i = -0.01 * h_i; // } // } // } //wetten corner nodes if needed int corner_size = mcorner_nodes.size(); for (int i = 0; i < corner_size; i++) { int i_node = mcorner_nodes[i]; bool to_be_wettened = true; double min_dist = 0.0; for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex() [i_node]; csr_index != mr_matrix_container.GetRowStartIndex() [i_node + 1]; csr_index++) { unsigned int j_neighbour = mr_matrix_container.GetColumnIndex() [csr_index]; double neighb_dist = mphi_n1[j_neighbour]; if (min_dist > neighb_dist) min_dist = neighb_dist; if (neighb_dist >= 0.0) { to_be_wettened=false; } } if (to_be_wettened==true) mphi_n1[i_node] = min_dist; } mr_matrix_container.WriteScalarToDatabase (DISTANCE, mphi_n1, mr_model_part.Nodes() ); KRATOS_CATCH ("") } void ReduceTimeStep (ModelPart& rModelPart, double NewTime) { KRATOS_TRY /* double current_time = rModelPart.GetProcessInfo()[TIME]; double current_delta_time = rModelPart.GetProcessInfo()[DELTA_TIME]; double old_time = current_time - current_delta_time; double new_reduced_time = NewTtime; double new_delta_time = new_reduced_time - old_time; rModelPart.GetProcessInfo()[TIME] = new_reduced_time; rModelPart.GetProcessInfo()[DELTA_TIME] = new_delta_time; //now copy the database from the old step on the top of the current step int step_data_size = ThisModelPart.GetNodalSolutionStepDataSize(); double* current_data = (pnode)->SolutionStepData().Data(0); double* old_data = (pnode)->SolutionStepData().Data(1); for (int j = 0; j < step_data_size; j++) current_data[j] = old_data[j]; */ rModelPart.OverwriteSolutionStepData (1, 0); rModelPart.GetProcessInfo().SetCurrentTime (NewTime); KRATOS_CATCH ("error in reducing the time step") } bool CheckDistanceConvection() { int n_large_distance_gradient = 0; array_1d<double, TDim> grad_d; ModelPart::NodesContainerType& rNodes = mr_model_part.Nodes(); int n_nodes = rNodes.size(); //calculate gradient of distance on the nodes and count occurrences of large gradients (that indicate a failure) for (int i_node = 0; i_node < n_nodes; i_node++) { double dist = mdistances[i_node]; if (dist <= 0.0) { for (unsigned int comp = 0; comp < TDim; comp++) grad_d[comp] = 0.0; double dist_i = mdistances[i_node]; for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex() [i_node]; csr_index != mr_matrix_container.GetRowStartIndex() [i_node + 1]; csr_index++) { //get global index of neighbouring node j unsigned int j_neighbour = mr_matrix_container.GetColumnIndex() [csr_index]; const double& dist_j = mdistances[j_neighbour]; //projection of pressure gradients CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues() [csr_index]; edge_ij.Add_grad_p (grad_d, dist_i, dist_j); } const double& m_inv = mr_matrix_container.GetInvertedMass() [i_node]; for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) grad_d[l_comp] *= m_inv; double norm_grad = norm_2 (grad_d); if (norm_grad > 1.5) //large gradient found n_large_distance_gradient += 1; } } if (n_large_distance_gradient != 0) { bool success = false; return success; } else { bool success = true; return success; } } void ActivateWallResistance (double Ywall) { mWallLawIsActive = true; mY_wall = Ywall; double max_angle_overall = 0.0; //compute wall reduction factor //slip condition int slip_size = mSlipBoundaryList.size(); #pragma omp parallel for firstprivate(slip_size) for (int i_slip = 0; i_slip < slip_size; i_slip++) { unsigned int i_node = mSlipBoundaryList[i_slip]; /* const array_1d<double, TDim>& an_i = mSlipNormal[i_node]; double AI = norm_2(an_i); array_1d<double,TDim> nI = an_i/AI; double min_dot_prod = 1.0; for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++) { unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index]; const array_1d<double, TDim>& an_j = mSlipNormal[j_neighbour]; double AJ = norm_2(an_j); if(AJ > 1e-20) //...a slip node! { double tmp = 0.0; for (unsigned int comp = 0; comp < TDim; comp++) tmp += nI[comp] * an_j[comp]; tmp /= AJ; tmp = fabs(tmp); if(tmp < min_dot_prod) min_dot_prod = tmp; } } double max_angle = acos(min_dot_prod); // max_angle *= 2.0; // if(max_angle > 3.1415926*0.5) max_angle = 3.1415926*0.5; if(max_angle > max_angle_overall) max_angle_overall = max_angle;*/ mWallReductionFactor[i_node] = 1.0; //sin(max_angle) + 0.1; // pow(sin(max_angle),6) * 10.0 /** 100.0*/ ; } std::cout << "max angle between normals found in the model = " << max_angle_overall << std::endl; // mr_matrix_container.WriteScalarToDatabase(YOUNG_MODULUS, mWallReductionFactor, mr_model_part.Nodes()); //slip condition // int slip_size = mSlipBoundaryList.size(); // #pragma omp parallel for firstprivate(slip_size) // for (int i_slip = 0; i_slip < slip_size; i_slip++) // { // unsigned int i_node = mSlipBoundaryList[i_slip]; // double h = mHavg[i_node]; // if(mY_wall < h) // mWallReductionFactor[i_node] = mY_wall/h; // } // int edge_size = medge_nodes.size(); #pragma omp parallel for firstprivate(edge_size) for (int i = 0; i < edge_size; i++) { int i_node = medge_nodes[i]; mWallReductionFactor[i_node] = medge_coefficient; //10.0; } // // //apply conditions on corners int corner_size = mcorner_nodes.size(); for (int i = 0; i < corner_size; i++) { int i_node = mcorner_nodes[i]; mWallReductionFactor[i_node] = mcorner_coefficient; //50.0; } } void ActivateClassicalWallResistance (double Ywall) { mWallLawIsActive = true; mY_wall = Ywall; for (unsigned int i = 0; i < mWallReductionFactor.size(); i++) mWallReductionFactor[i] = 1.0 ; } double ComputeVolumeVariation() { ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); double dt = CurrentProcessInfo[DELTA_TIME]; //slip condition int inout_size = mInOutBoundaryList.size(); double vol_var = 0.0; //#pragma omp parallel for firstprivate(slip_size) for (int i = 0; i < inout_size; i++) { unsigned int i_node = mInOutBoundaryList[i]; double dist = mdistances[i_node]; if (dist <= 0.0) { const array_1d<double, TDim>& U_i = mvel_n1[i_node]; const array_1d<double, TDim>& an_i = mInOutNormal[i_node]; double projection_length = 0.0; for (unsigned int comp = 0; comp < TDim; comp++) { projection_length += U_i[comp] * an_i[comp]; } vol_var += projection_length; } } return -vol_var * dt; } double ComputeWetVolume() { KRATOS_TRY mr_matrix_container.FillScalarFromDatabase (DISTANCE, mdistances, mr_model_part.Nodes() ); //slip condition double wet_volume = 0.0; //#pragma omp parallel for firstprivate(slip_size) for (int i = 0; i < static_cast<int> (mdistances.size() ); i++) { double dist = mdistances[i]; const double m = mr_matrix_container.GetLumpedMass() [i]; double porosity = mEps[i]; if (dist <= 0.0) { wet_volume += m/porosity; } } return wet_volume; KRATOS_CATCH (""); } double ComputeTotalVolume() { KRATOS_TRY mr_matrix_container.FillScalarFromDatabase (DISTANCE, mdistances, mr_model_part.Nodes() ); //slip condition double volume = 0.0; //#pragma omp parallel for firstprivate(slip_size) for (int i = 0; i < static_cast<int> (mdistances.size() ); i++) { const double m = mr_matrix_container.GetLumpedMass() [i]; double porosity = mEps[i]; volume += m/porosity; } return volume; KRATOS_CATCH (""); } void DiscreteVolumeCorrection (double expected_volume, double measured_volume) { double volume_error = expected_volume - measured_volume; if (measured_volume < expected_volume) { double layer_volume = 0.0; std::vector<unsigned int> first_outside; int n_nodes = mdistances.size(); // find list of the first nodes outside of the fluid and compute their volume for (int i_node = 0; i_node < n_nodes; i_node++) { double dist = mdistances[i_node]; if (dist > 0.0) //node is outside domain { for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex() [i_node]; csr_index != mr_matrix_container.GetRowStartIndex() [i_node + 1]; csr_index++) { unsigned int j_neighbour = mr_matrix_container.GetColumnIndex() [csr_index]; if (mdistances[j_neighbour] <= 0.0) { const double nodal_mass = 1.0 / mr_matrix_container.GetInvertedMass() [i_node]; if (nodal_mass < volume_error - layer_volume) { first_outside.push_back (i_node); layer_volume += nodal_mass; break; } //const double m_inv = mr_matrix_container.GetInvertedMass()[i_node]; //layer_volume += 1.0/m_inv; } } } } // std::cout << ", layer_volume: " << layer_volume << std::endl; // if (measured_volume + layer_volume <= expected_volume) { // mark the nodes in the outside layer with a small negative distance for (unsigned int i=0; i<first_outside.size(); i++) { unsigned int i_node = first_outside[i]; mdistances[i_node] = -mHavg[i_node]; } } } mr_matrix_container.WriteScalarToDatabase (DISTANCE, mdistances, mr_model_part.Nodes() ); //if (measured_volume < expected_volume) // { // double layer_volume = 0.0; // std::vector<unsigned int> first_outside; // int n_nodes = mdistances.size(); // //find list of the first nodes outside of the fluid and compute their volume // for (int i_node = 0; i_node < n_nodes; i_node++) // { // double dist = mdistances[i_node]; // if (dist > 0.0) //node is outside domain // { // for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++) // { // unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index]; // if(mdistances[j_neighbour] <= 0.0) // { // first_outside.push_back(i_node); // const double m_inv = mr_matrix_container.GetInvertedMass()[i_node]; // layer_volume += 1.0/m_inv; // } // } // } // } // if (measured_volume + layer_volume <= expected_volume) // { // //mark the nodes in the outside layer with a small negative distance // for(unsigned int i=0; i<first_outside.size(); i++) // { // unsigned int i_node = first_outside[i]; // mdistances[i_node] = -mHavg[i_node]; // } // } // } // mr_matrix_container.WriteScalarToDatabase(DISTANCE, mdistances, mr_model_part.Nodes()); } void SetWallReductionCoefficients (double corner_coefficient, double edge_coefficient) { mcorner_coefficient = corner_coefficient; medge_coefficient = edge_coefficient; } void ContinuousVolumeCorrection (double expected_volume, double measured_volume) { double volume_error = expected_volume - measured_volume; if (volume_error == 0.0) return ; if (measured_volume < expected_volume) { double layer_volume = 0.0; std::vector<unsigned int> first_outside; int n_nodes = mdistances.size(); // find list of the first nodes outside of the fluid and compute their volume for (int i_node = 0; i_node < n_nodes; i_node++) { double dist = mdistances[i_node]; bool is_bubble = true; bool is_first_outside = false; if (dist > 0.0) //node is outside domain { for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex() [i_node]; csr_index != mr_matrix_container.GetRowStartIndex() [i_node + 1]; csr_index++) { unsigned int j_neighbour = mr_matrix_container.GetColumnIndex() [csr_index]; if (mdistances[j_neighbour] <= 0.0) { is_first_outside = true; } else is_bubble = false; } } if (is_first_outside && !is_bubble) { const double nodal_mass = 1.0 / mr_matrix_container.GetInvertedMass() [i_node]; first_outside.push_back (i_node); layer_volume += nodal_mass; // if(nodal_mass > volume_error - layer_volume) // { // extra_volume += nodal_mass; // } } } // std::cout << ", layer_volume: " << layer_volume << std::endl; if (layer_volume == 0.00) return; double ratio = volume_error / layer_volume; if (ratio > 1.0) ratio = 1.0; // KRATOS_WATCH (ratio); if (ratio < 0.1) // NO correction for less than 10% error return; double average_layer_h = 0.0; for (unsigned int i=0; i<first_outside.size(); i++) { unsigned int i_node = first_outside[i]; average_layer_h += mHavg[i_node]; } average_layer_h /= static_cast<double> (first_outside.size() ); for (int i_node = 0; i_node < n_nodes; i_node++) mdistances[i_node] -= average_layer_h* ratio; // if((ratio < 1.00)) // { // // mark the nodes in the outside layer with a small negative distance // for(unsigned int i=0; i<first_outside.size(); i++) // { // unsigned int i_node = first_outside[i]; // mdistances[i_node] -= mHavg[i_node] * ratio; // } // } // else // { // // mark the nodes in the outside layer with a small negative distance // for(unsigned int i=0; i<first_outside.size(); i++) // { // unsigned int i_node = first_outside[i]; // mdistances[i_node] = -mHavg[i_node]; // } // } } mr_matrix_container.WriteScalarToDatabase (DISTANCE, mdistances, mr_model_part.Nodes() ); return; } // void FindBubbles() // { // int n_nodes = mdistances.size(); // ValuesVectorType last_air (n_nodes); // mr_matrix_container.SetToZero (last_air); // mr_matrix_container.FillScalarFromDatabase (LAST_AIR, last_air, mr_model_part.Nodes() ); // const int max_bubble_nodes = 12; // const int min_bubble_nodes = 2; // #pragma omp parallel for // for ( int i_node = 0; i_node < static_cast<int>(mr_model_part.Nodes().size()); i_node++) // mis_visited[i_node] = 0; // // // loop over the nodes to find a outside node. // for (int i_node = 0; i_node < n_nodes; i_node++) // { // double dist = mdistances[i_node]; // if ( (mis_visited[i_node] == 0) && (dist > 0.0) ) // node is outside the domain and has not visited yet // { // std::vector<int> outside_nodes (n_nodes,0); // outside_nodes[0] = i_node; // mis_visited[i_node] = 1; // int n_outside = 1; // for (int i = 0 ; i < n_outside ; i++) // loop over founded outside nodes. NOTE: n_outside is increasing inside the loop // { // int this_node = outside_nodes[i]; // // loop over neighbours of this node // for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex() [this_node]; csr_index != mr_matrix_container.GetRowStartIndex() [this_node + 1]; csr_index++) // { // unsigned int j_neighbour = mr_matrix_container.GetColumnIndex() [csr_index]; // if ( (mis_visited[j_neighbour] == 0) && (mdistances[j_neighbour] >= 0.0) ) // the neighbour node is outside the fluid and not visited yet // { // outside_nodes[n_outside] = j_neighbour; // n_outside++; // } // mis_visited[j_neighbour] = 1; // } // } // //KRATOS_WATCH(i_node); // //KRATOS_WATCH(n_outside); // //KRATOS_WATCH(is_first_outside); // if ( (n_outside <= max_bubble_nodes) && (n_outside >= min_bubble_nodes) ) // { // //KRATOS_WATCH(i_node); // //KRATOS_WATCH(n_outside); // for (int i = 0 ; i < n_outside ; i++) // last_air[outside_nodes[i]] = 1.00; // } // } // } // mr_matrix_container.WriteScalarToDatabase (LAST_AIR, last_air, mr_model_part.Nodes() ); // } // // void FindColdShots() // { // int n_nodes = mdistances.size(); // ValuesVectorType cold_shots(n_nodes); // // mr_matrix_container.SetToZero(cold_shots); // // mr_matrix_container.FillScalarFromDatabase(LAST_AIR, cold_shots, mr_model_part.Nodes()); // // std::vector<bool> is_first_outside(n_nodes, 0); // // std::vector<unsigned int> first_outside; // // // find list of the first nodes outside of the fluid // for (int i_node = 0; i_node < n_nodes; i_node++) // { // double dist = mdistances[i_node]; // if (dist > 0.0) //node is outside domain // { // for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++) // { // unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index]; // if(mdistances[j_neighbour] <= 0.0) // { // is_first_outside[i_node] = true; // first_outside.push_back(i_node); // break; // } // } // } // } // // // std::vector<bool> is_cold_shot(is_first_outside); // // // Now we check if all the neighbours of the first_outside nodes are first outside or inside and mark it as a possible cold shot // for(unsigned int i=0; i<first_outside.size(); i++) // { // unsigned int i_node = first_outside[i]; // for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++) // { // unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index]; // if(!is_first_outside[j_neighbour]) // { // is_cold_shot[i_node] = false; // break; // } // } // } // // // //Now we have the possible cold shots and is time to check the gradient of convection // for(unsigned int i=0; i<first_outside.size(); i++) // { // unsigned int i_node = first_outside[i]; // if(is_cold_shot[i_node]) // { // for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++) // { // unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index]; // if(mdistances[j_neighbour] <= 0.0) // { // // } // } // } // } // // // // // Adding the founded cold shots to the previous ones. // for(int i_node = 0; i_node < n_nodes; i_node++) // if(is_cold_shot[i_node]) // cold_shots[i_node]=1.00; // // mr_matrix_container.WriteScalarToDatabase(LAST_AIR, cold_shots, mr_model_part.Nodes()); // } void CalculatePorousResistanceLaw(unsigned int res_law) { //variables for node based data handling // ModelPart::NodesContainerType& rNodes = mr_model_part.Nodes(); // mr_matrix_container.FillScalarFromDatabase(DIAMETER, mD, mr_model_part.Nodes()); // mr_matrix_container.FillScalarFromDatabase(POROSITY, mEps, mr_model_part.Nodes()); // mr_matrix_container.FillScalarFromDatabase(LIN_DARCY_COEF, mA, mr_model_part.Nodes()); // mr_matrix_container.FillScalarFromDatabase(NONLIN_DARCY_COEF, mB, mr_model_part.Nodes()); // const double nu_i = mViscosity; if (res_law == 1) { // KRATOS_WATCH("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Calculating Ergun Darcy coefficients ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~") /* if the chosen resistance law is ERGUN calculate Ergun A and B*/ for (ModelPart::NodesContainerType::iterator inode = mr_model_part.NodesBegin(); inode != mr_model_part.NodesEnd(); inode++) { const double eps = inode->FastGetSolutionStepValue (POROSITY); // KRATOS_WATCH("POROSITY ") // KRATOS_WATCH(eps) const double d = inode->FastGetSolutionStepValue (DIAMETER); // KRATOS_WATCH("DIAMETER ") // KRATOS_WATCH(d) // // KRATOS_WATCH("VISCOSITY ") // KRATOS_WATCH(mViscosity) double& a = inode-> FastGetSolutionStepValue (LIN_DARCY_COEF); double& b = inode-> FastGetSolutionStepValue (NONLIN_DARCY_COEF); if (eps < 1.0) { double k_inv = 150.0 * (1.0 - eps) * (1.0 - eps) / (eps * eps * eps * d * d); a = mViscosity * k_inv; b = (1.75 / eps) * sqrt (k_inv / (150.0 * eps) ); // KRATOS_WATCH("PERMEABILITY ") // KRATOS_WATCH(k_inv) // KRATOS_WATCH("LIN DARCY COEFFICIENT ") // KRATOS_WATCH(a) // KRATOS_WATCH("NONLIN DARCY COEFFICIENT ") // KRATOS_WATCH(b) } else { a = 0; b = 0; } } } else { /* whether it is a Custom Resistance law or NO resistance law is present ---> set to zero A and B for non porous nodes*/ for (ModelPart::NodesContainerType::iterator inode = mr_model_part.NodesBegin(); inode != mr_model_part.NodesEnd(); inode++) { const double eps = inode->FastGetSolutionStepValue (POROSITY); /*reading from kratos database*/ double& a = inode-> FastGetSolutionStepValue (LIN_DARCY_COEF); /*changing kratos database*/ double& b = inode-> FastGetSolutionStepValue (NONLIN_DARCY_COEF); /*changing kratos database*/ if (eps == 1.0) { a = 0; b = 0; } } } mr_matrix_container.FillScalarFromDatabase (LIN_DARCY_COEF, mA, mr_model_part.Nodes() ); /*filling edgebased database reading from kratos database*/ mr_matrix_container.FillScalarFromDatabase (NONLIN_DARCY_COEF, mB, mr_model_part.Nodes() ); /*filling edgebased database reading from kratos database*/ } private: double mMolecularViscosity; double mcorner_coefficient; double medge_coefficient; double mmax_dt; MatrixContainer& mr_matrix_container; ModelPart& mr_model_part; int mnumsubsteps; bool muse_mass_correction; //parameters controlling the wall law bool mWallLawIsActive; double mY_wall; //parameters for controlling the usage of the delta time in the stabilization double mstabdt_pressure_factor; double mstabdt_convection_factor; double medge_detection_angle; double mtau2_factor; bool massume_constant_dp; //nodal values ValuesVectorType mViscosity; //velocity vector U at time steps n and n+1 CalcVectorType mWork, mvel_n, mvel_n1, mx, macc; //pressure vector p at time steps n and n+1 ValuesVectorType mPn, mPn1; //coefficients ValuesVectorType mdistances; //minimum length of the edges surrounding edges surrounding each nodal point ValuesVectorType mHmin; ValuesVectorType mHavg; CalcVectorType mEdgeDimensions; //area normal CalcVectorType mSlipNormal; CalcVectorType mInOutNormal; //projection terms CalcVectorType mPi, mXi; //flag for first time step bool mFirstStep; //flag to differentiate interior and boundary nodes ValuesVectorType mNodalFlag; ValuesVectorType mWallReductionFactor; //lists of nodes with different types of boundary conditions IndicesVectorType mSlipBoundaryList, mPressureOutletList, mFixedVelocities, mInOutBoundaryList,mDistanceBoundaryList; ValuesVectorType mDistanceValuesList; CalcVectorType mFixedVelocitiesValues; // ValuesVectorType mPressureOutlet; //intrinsic time step size ValuesVectorType mTauPressure; ValuesVectorType mTauConvection; ValuesVectorType mTau2; ValuesVectorType mdiv_error; boost::numeric::ublas::vector<bool> mis_slip; boost::numeric::ublas::vector<int> mis_visited; //variables for resolving pressure equation //laplacian matrix TSystemMatrixType mL; //constant variables double mRho; array_1d<double, TDim> mBodyForce; //variables for convection ValuesVectorType mphi_n; ValuesVectorType mphi_n1; CalcVectorType mPiConvection; ValuesVectorType mBeta; //variables for edge BCs IndicesVectorType medge_nodes; CalcVectorType medge_nodes_direction; IndicesVectorType mcorner_nodes; ValuesVectorType mEps; ValuesVectorType mdiag_stiffness; // ValuesVectorType mD; ValuesVectorType mA; ValuesVectorType mB; double mdelta_t_avg; double max_dt; double mshock_coeff; //*********************************************************** //functions to calculate area normals for boundary conditions void CalculateNormal2D (ModelPart::ConditionsContainerType::iterator cond_it, array_1d<double, 3 > & area_normal) { Geometry<Node < 3 > >& face_geometry = (cond_it)->GetGeometry(); area_normal[0] = face_geometry[1].Y() - face_geometry[0].Y(); area_normal[1] = - (face_geometry[1].X() - face_geometry[0].X() ); area_normal[2] = 0.00; noalias ( (cond_it)->GetValue (NORMAL) ) = area_normal; } void CalculateNormal3D (ModelPart::ConditionsContainerType::iterator cond_it, array_1d<double, 3 > & area_normal, array_1d<double, 3 > & v1, array_1d<double, 3 > & v2) { Geometry<Node < 3 > >& face_geometry = (cond_it)->GetGeometry(); v1[0] = face_geometry[1].X() - face_geometry[0].X(); v1[1] = face_geometry[1].Y() - face_geometry[0].Y(); v1[2] = face_geometry[1].Z() - face_geometry[0].Z(); v2[0] = face_geometry[2].X() - face_geometry[0].X(); v2[1] = face_geometry[2].Y() - face_geometry[0].Y(); v2[2] = face_geometry[2].Z() - face_geometry[0].Z(); MathUtils<double>::CrossProduct (area_normal, v1, v2); area_normal *= -0.5; noalias ( (cond_it)->GetValue (NORMAL) ) = area_normal; } //********************************************************* //function to calculate minimum length of surrounding edges void CalculateEdgeLengths (ModelPart::NodesContainerType& rNodes) { KRATOS_TRY //get number of nodes unsigned int n_nodes = rNodes.size(); //reserve memory for storage of nodal coordinates std::vector< array_1d<double, TDim > > position; position.resize (n_nodes); //get position of all nodes for (typename ModelPart::NodesContainerType::iterator node_it = rNodes.begin(); node_it != rNodes.end(); node_it++) { //get the global index of the node unsigned int i_node = static_cast<unsigned int> (node_it->FastGetSolutionStepValue (AUX_INDEX) ); //save its coordinates locally noalias (position[i_node]) = node_it->Coordinates(); //initialize minimum edge length with relatively big values // mHmin[i_node] = 1e10; } ValuesVectorType& aaa = mr_matrix_container.GetHmin(); for (unsigned int i_node = 0; i_node < n_nodes; i_node++) { mHmin[i_node] = aaa[i_node]; if (aaa[i_node] == 0.0) KRATOS_THROW_ERROR (std::logic_error,"found a 0 hmin on node",i_node); } //take unstructured meshes into account if (TDim == 2) { for (unsigned int i_node = 0; i_node < n_nodes; i_node++) { double& h_i = mHavg[i_node]; double& m_i = mr_matrix_container.GetLumpedMass() [i_node]; // double& rho_i = mRho[i_node]; h_i = sqrt (2.0 * m_i); } } else if (TDim == 3) { for (unsigned int i_node = 0; i_node < n_nodes; i_node++) { double& h_i = mHavg[i_node]; double& m_i = mr_matrix_container.GetLumpedMass() [i_node]; // double& rho_i = mRho[i_node]; h_i = pow (6.0 * m_i, 1.0 / 3.0); } } //compute edge coordinates for (unsigned int i_node = 0; i_node < n_nodes; i_node++) { array_1d<double, TDim > & pos_i = position[i_node]; for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex() [i_node]; csr_index != mr_matrix_container.GetRowStartIndex() [i_node + 1]; csr_index++) { unsigned int j_neighbour = mr_matrix_container.GetColumnIndex() [csr_index]; array_1d<double, TDim > & pos_j = position[j_neighbour]; array_1d<double, TDim>& l_k = mEdgeDimensions[csr_index]; for (unsigned int comp = 0; comp < TDim; comp++) l_k[comp] = pos_i[comp] - pos_j[comp]; } } KRATOS_CATCH ("") } //********************************************************************* //function to calculate right-hand side of fractional momentum equation void CalculateRHS_convection ( const ValuesVectorType& mphi, const CalcVectorType& convective_velocity, ValuesVectorType& rhs, ValuesVectorType& active_nodes ) { KRATOS_TRY int n_nodes = mphi.size(); // //calculating the convective projection //#pragma omp parallel for // for (int i_node = 0; i_node < n_nodes; i_node++) // { // // double& pi_i = mPiConvection[i_node]; // const double& phi_i = mphi[i_node]; // // //set to zero the projection // pi_i = 0; // if (active_nodes[i_node] != 0.0) // { // // const array_1d<double, TDim>& a_i = convective_velocity[i_node]; // // //loop to all the edges surrounding node I // for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++) // { // unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index]; // // if (active_nodes[j_neighbour] != 0.0) // { // const array_1d<double, TDim>& a_j = convective_velocity[j_neighbour]; // const double& phi_j = mphi[j_neighbour]; // // CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues()[csr_index]; // // edge_ij.Add_ConvectiveContribution(pi_i, a_i, phi_i, a_j, phi_j); // } // } // // //apply inverted mass matrix // const double m_inv = mr_matrix_container.GetInvertedMass()[i_node]; // pi_i *= m_inv; // } // // KRATOS_WATCH(pi_i); // // num = fabs(num); // // if(num > norm_vI*0.0001) // // mBeta[i_node] = 1.0 - num/denom; // // else // // mBeta[i_node] = 1.0; // // } //perform MPI syncronization //calculating the RHS double stab_low; double stab_high; array_1d<double, TDim> a_i; array_1d<double, TDim> a_j; #pragma omp parallel for private(stab_low,stab_high,a_i,a_j) for (int i_node = 0; i_node < n_nodes; i_node++) { double& rhs_i = rhs[i_node]; const double& h_i = mHavg[i_node]; const double& phi_i = mphi[i_node]; noalias (a_i) = convective_velocity[i_node]; a_i /= mEps[i_node]; const array_1d<double, TDim>& proj_i = mPiConvection[i_node]; // const double& pi_i = mPiConvection[i_node]; double pi_i = proj_i[0] * a_i[0]; for (unsigned int l_comp = 1; l_comp < TDim; l_comp++) pi_i += proj_i[l_comp] * a_i[l_comp]; // double beta = mBeta[i_node]; rhs_i = 0.0; if (active_nodes[i_node] != 0.0) { const double& beta = mBeta[i_node]; double norm_a = a_i[0] * a_i[0]; for (unsigned int l_comp = 1; l_comp < TDim; l_comp++) norm_a += a_i[l_comp] * a_i[l_comp]; norm_a = sqrt (norm_a); //loop to all the edges surrounding node I for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex() [i_node]; csr_index != mr_matrix_container.GetRowStartIndex() [i_node + 1]; csr_index++) { unsigned int j_neighbour = mr_matrix_container.GetColumnIndex() [csr_index]; if (active_nodes[j_neighbour] != 0.0) { //double& rhs_j = rhs[j_neighbour]; const double& phi_j = mphi[j_neighbour]; noalias (a_j) = convective_velocity[j_neighbour]; a_j /= mEps[j_neighbour]; // const double& pi_j = mPiConvection[j_neighbour]; const array_1d<double, TDim>& proj_j = mPiConvection[j_neighbour]; double pi_j = proj_j[0] * a_i[0]; for (unsigned int l_comp = 1; l_comp < TDim; l_comp++) pi_j += proj_j[l_comp] * a_i[l_comp]; CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues() [csr_index]; //convection operator edge_ij.Sub_ConvectiveContribution (rhs_i, a_i, phi_i, a_j, phi_j); //esto funciona // edge_ij.Sub_D_v(rhs_i, a_i*phi_i, a_i*phi_j); //calculate stabilization part edge_ij.CalculateConvectionStabilization_LOW (stab_low, a_i, phi_i, a_j, phi_j); double edge_tau = mTauConvection[i_node]; edge_ij.CalculateConvectionStabilization_HIGH (stab_high, a_i, pi_i, a_j, pi_j); edge_ij.Sub_StabContribution (rhs_i, edge_tau, 1.0, stab_low, stab_high); double coeff = 0.5 * mshock_coeff; //=0.7*0.5; double laplacian_ij = 0.0; edge_ij.CalculateScalarLaplacian (laplacian_ij); double capturing = laplacian_ij * (phi_j - phi_i); // rhs_i-= coeff*capturing*beta*norm_a*h_i; double aaa = 0.0; for (unsigned int k_comp = 0; k_comp < TDim; k_comp++) for (unsigned int m_comp = 0; m_comp < TDim; m_comp++) aaa += a_i[k_comp] * a_i[m_comp] * edge_ij.LaplacianIJ (k_comp, m_comp); if (norm_a > 1e-10) { aaa /= (norm_a * norm_a); double capturing2 = aaa * (phi_j - phi_i); if (fabs (capturing) > fabs (capturing2) ) rhs_i -= coeff * (capturing - capturing2) * beta * norm_a * h_i; } } } } // KRATOS_WATCH(rhs_i); } // int inout_size = mInOutBoundaryList.size(); // //#pragma omp parallel for firstprivate(slip_size) // for (int i = 0; i < inout_size; i++) // { // unsigned int i_node = mInOutBoundaryList[i]; // double dist = mdistances[i_node]; // if (dist <= 0.0) // { // const array_1d<double, TDim>& U_i = mvel_n1[i_node]; // const array_1d<double, TDim>& an_i = mInOutNormal[i_node]; // double projection_length = 0.0; // double Ain = 0.0; // for (unsigned int comp = 0; comp < TDim; comp++) // { // projection_length += U_i[comp] * an_i[comp]; // Ain += an_i[comp]*an_i[comp]; // } // // double& rhs_i = rhs[i_node]; // // rhs_i += projection_length * mphi[i_node]; // } // } // int inout_size = mInOutBoundaryList.size(); // double vol_var = 0.0; // //#pragma omp parallel for firstprivate(slip_size) // for (int i = 0; i < inout_size; i++) // { // unsigned int i_node = mInOutBoundaryList[i]; // double dist = mdistances[i_node]; // // if (dist <= 0.0) // // { // const array_1d<double, TDim>& U_i = mvel_n1[i_node]; // const array_1d<double, TDim>& an_i = mInOutNormal[i_node]; // double A = norm_2(an_i); // // double projection_length = 0.0; // for (unsigned int comp = 0; comp < TDim; comp++) // { // projection_length += U_i[comp] * an_i[comp]; // } // // double& rhs_i = rhs[i_node]; // // if(projection_length > 0) //outlet // // rhs_i += A; // // else // rhs_i -= A; // // // } // } KRATOS_CATCH ("") } //************************************** void CornerDectectionHelper (Geometry< Node < 3 > >& face_geometry, const array_1d<double, 3 > & face_normal, const double An, const GlobalPointersVector<Condition>& neighb, const unsigned int i1, const unsigned int i2, const unsigned int neighb_index, std::vector<unsigned int>& edge_nodes, CalcVectorType& cornern_list ) { double acceptable_angle = 45.0 / 180.0 * 3.1; //angles of less than 45 deg will be accepted double acceptable_cos = cos (acceptable_angle); if (face_geometry[i1].Id() < face_geometry[i2].Id() ) //we do this to add the face ones { const array_1d<double, 3 > & neighb_normal = neighb[neighb_index].GetValue (NORMAL); double neighb_An = norm_2 (neighb_normal); double cos_normal = 1.0 / (An * neighb_An) * inner_prod (face_normal, neighb_normal); //if the angle is too big between the two normals then the edge in the middle is a corner if (cos_normal < acceptable_cos) { array_1d<double, 3 > edge = face_geometry[i2].Coordinates() - face_geometry[i1].Coordinates(); double temp = norm_2 (edge); edge /= temp; int index1 = face_geometry[i1].FastGetSolutionStepValue (AUX_INDEX); int index2 = face_geometry[i2].FastGetSolutionStepValue (AUX_INDEX); edge_nodes[index1] += 1; edge_nodes[index2] += 1; // double sign1 = inner_prod (cornern_list[index1], edge); double sign1 = 0.0; for(unsigned int i = 0 ; i < edge.size() ; i++) {sign1 += cornern_list[index1][i]*edge[i];} if (sign1 >= 0) { for(unsigned int i = 0 ; i < edge.size() ; i++) cornern_list[index1][i] += edge[i]; } else { for(unsigned int i = 0 ; i < edge.size() ; i++) cornern_list[index1][i] -= edge[i]; } double sign2 = inner_prod(cornern_list[index2], edge); if (sign2 >= 0) { for(unsigned int i = 0 ; i < edge.size() ; i++) cornern_list[index2][i] += edge[i]; } else { for(unsigned int i = 0 ; i < edge.size() ; i++) cornern_list[index2][i] -= edge[i]; } } } } //function to calculate the area normals void DetectEdges3D (ModelPart::ConditionsContainerType& rConditions) { KRATOS_TRY //calculate area normals face-by-face array_1d<double, 3 > area_normal; //(re)initialize normals unsigned int n_nodes = mNodalFlag.size(); std::vector<unsigned int> temp_edge_nodes (n_nodes); CalcVectorType temp_cornern_list (n_nodes); for (unsigned int i_node = 0; i_node < n_nodes; i_node++) { temp_edge_nodes[i_node] = 0.0; noalias (temp_cornern_list[i_node]) = ZeroVector (TDim); } //loop over all faces // const double node_factor = 1.0 / TDim; for (ModelPart::ConditionsContainerType::iterator cond_it = rConditions.begin(); cond_it != rConditions.end(); cond_it++) { //get geometry data of the face Geometry<Node < 3 > >& face_geometry = cond_it->GetGeometry(); //reference for area normal of the face const array_1d<double, 3 > & face_normal = cond_it->GetValue (NORMAL); double An = norm_2 (face_normal); unsigned int current_id = cond_it->Id(); //slip condition if (cond_it->GetValue (IS_STRUCTURE) == 1.0) //this is a slip face --> now look for its neighbours { const GlobalPointersVector<Condition>& neighb = cond_it->GetValue (NEIGHBOUR_CONDITIONS); //check for neighbour zero if (neighb[0].Id() != current_id) //check if the neighbour exists CornerDectectionHelper (face_geometry, face_normal, An, neighb, 1, 2, 0, temp_edge_nodes, temp_cornern_list); //check for neighbour one if (neighb[1].Id() != current_id) //check if the neighbour exists CornerDectectionHelper (face_geometry, face_normal, An, neighb, 2, 0, 1, temp_edge_nodes, temp_cornern_list); //check for neighbour two if (neighb[2].Id() != current_id) //check if the neighbour exists CornerDectectionHelper (face_geometry, face_normal, An, neighb, 0, 1, 2, temp_edge_nodes, temp_cornern_list); } } // ModelPart::NodesContainerType& rNodes = mr_model_part.Nodes(); // mr_matrix_container.WriteVectorToDatabase(ACCELERATION, temp_cornern_list, rNodes); //fill the list of edge_nodes std::vector<unsigned int> tempmedge_nodes; std::vector< array_1d<double,TDim> > tempmedge_nodes_direction; std::vector<unsigned int> tempmcorner_nodes; for (unsigned int i_node = 0; i_node < n_nodes; i_node++) { if (temp_edge_nodes[i_node] == 2) //node is a edge_node { tempmedge_nodes.push_back (i_node); array_1d<double, TDim>& node_edge = temp_cornern_list[i_node]; node_edge /= norm_2 (node_edge); tempmedge_nodes_direction.push_back (node_edge); } else if (temp_edge_nodes[i_node] > 2) tempmcorner_nodes.push_back (i_node); } medge_nodes.resize (tempmedge_nodes.size(),false); medge_nodes_direction.resize (tempmedge_nodes_direction.size(),false); mcorner_nodes.resize (tempmcorner_nodes.size(),false); #pragma omp parallel for for (int i = 0; i < static_cast<int> (tempmedge_nodes.size() ); i++) { medge_nodes[i] = tempmedge_nodes[i]; medge_nodes_direction[i] = tempmedge_nodes_direction[i]; } #pragma omp parallel for for (int i = 0; i < static_cast<int> (tempmcorner_nodes.size() ); i++) { mcorner_nodes[i] = tempmcorner_nodes[i]; } for (unsigned int i = 0; i < mcorner_nodes.size(); i++) { KRATOS_WATCH (mcorner_nodes[i]); } KRATOS_CATCH ("") } // double ComputePorosityCoefficient(const double& viscosity, const double& vel_norm, const double& eps, const double& d) // { // // const double d = 0.01; //to be changed // double linear; // double non_linear; // if (eps < 1.0) // { // double k_inv = 150.0 * (1.0 - eps)*(1.0 - eps) / (eps * eps * eps * d * d); // linear = eps * viscosity * k_inv; // non_linear = (1.75 * vel_norm) * sqrt(k_inv / (150.0 * eps)); // // double linear = viscosity * k_inv; // // double non_linear = (1.75 * vel_norm / eps) * sqrt(k_inv / (150.0 * eps)); // } // else // { // linear = 0.0; // non_linear = 0.0; // } // return linear + non_linear; // } double ComputePorosityCoefficient (const double& vel_norm, const double& eps, const double& a, const double& b) { double linear; double non_linear; linear = eps * a; non_linear = eps * b * vel_norm; return linear + non_linear; } void LaplacianSmooth (ValuesVectorType& to_be_smoothed, ValuesVectorType& aux) { ModelPart::NodesContainerType& rNodes = mr_model_part.Nodes(); int n_nodes = rNodes.size(); #pragma omp parallel for for (int i_node = 0; i_node < n_nodes; i_node++) { double dist = mdistances[i_node]; double correction = 0.0; const double& origin_i = to_be_smoothed[i_node]; if (dist <= 0.0) //node is inside domain ---- if outside do nothing { for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex() [i_node]; csr_index != mr_matrix_container.GetRowStartIndex() [i_node + 1]; csr_index++) { unsigned int j_neighbour = mr_matrix_container.GetColumnIndex() [csr_index]; const double& origin_j = to_be_smoothed[j_neighbour]; CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues() [csr_index]; double l_ikjk; edge_ij.CalculateScalarLaplacian (l_ikjk); correction += l_ikjk * (origin_j - origin_i); } } aux[i_node] = origin_i - correction; } #pragma omp parallel for for (int i_node = 0; i_node < n_nodes; i_node++) to_be_smoothed[i_node] = aux[i_node]; } void ComputeWallResistance ( const CalcVectorType& vel, ValuesVectorType& diag_stiffness // CalcVectorType& rhs ) { //parameters: // double k = 0.41; // double B = 5.1; // double density = mRho; // double toll = 1e-6; double ym = mY_wall; //0.0825877; //0.0093823 // double y_plus_incercept = 10.9931899; // unsigned int itmax = 100; if (mViscosity[0] == 0) KRATOS_THROW_ERROR (std::logic_error, "it is not possible to use the wall law with 0 viscosity", ""); /* //slip condition int slip_size = mSlipBoundaryList.size(); #pragma omp parallel for firstprivate(slip_size,B,toll,ym,y_plus_incercept,itmax) for (int i_slip = 0; i_slip < slip_size; i_slip++) { unsigned int i_node = mSlipBoundaryList[i_slip]; double dist = mdistances[i_node]; if (dist <= 0.0) { double nu = mViscosity[i_node]; //array_1d<double, TDim>& rhs_i = rhs[i_node]; const array_1d<double, TDim>& U_i = vel[i_node]; const array_1d<double, TDim>& an_i = mSlipNormal[i_node]; //compute the modulus of the velocity double mod_vel = 0.0; double area = 0.0; for (unsigned int comp = 0; comp < TDim; comp++) { mod_vel += U_i[comp] * U_i[comp]; area += an_i[comp] * an_i[comp]; } mod_vel = sqrt(mod_vel); area = sqrt(area); //now compute the skin friction double mod_uthaw = sqrt(mod_vel * nu / ym); double y_plus = ym * mod_uthaw / nu; if (y_plus > y_plus_incercept) { //begin cicle to calculate the real u_thaw's module: unsigned int it = 0; double dx = 1e10; // KRATOS_WATCH(fabs(dx)); while ( (fabs(dx) > toll * mod_uthaw) && (it < itmax) ) { double a = 1.0 / k; double temp = a * log(ym * mod_uthaw / nu) + B; double y = mod_uthaw * (temp) - mod_vel; double y1 = temp + a; dx = y / y1; mod_uthaw -= dx; it = it + 1; } if (it == itmax) std::cout << "attention max number of iterations exceeded in wall law computation" << std::endl; } double tau = mod_uthaw * mod_uthaw ; tau *= mWallReductionFactor[i_node]; if (mod_vel > 1e-9) diag_stiffness[i_node] = tau * area / mod_vel;*/ /* int slip_size = mSlipBoundaryList.size(); #pragma omp parallel for firstprivate(slip_size,B,toll,ym,y_plus_incercept,itmax) for (int i_slip = 0; i_slip < slip_size; i_slip++) { unsigned int i_node = mSlipBoundaryList[i_slip]; double dist = mdistances[i_node]; if (dist <= 0.0) { double nu = mViscosity[i_node]; //array_1d<double, TDim>& rhs_i = rhs[i_node]; const array_1d<double, TDim>& U_i = vel[i_node]; const array_1d<double, TDim>& an_i = mSlipNormal[i_node]; //compute the modulus of the velocity double mod_vel = 0.0; double area = 0.0; for (unsigned int comp = 0; comp < TDim; comp++) { mod_vel += U_i[comp] * U_i[comp]; area += an_i[comp] * an_i[comp]; } mod_vel = sqrt (mod_vel); area = sqrt (area); diag_stiffness[i_node] = area * mod_vel /pow(1.0/k*log(100) + B,2) * mWallReductionFactor[ i_node ]; } else diag_stiffness[i_node] = 0.0; }*/ //slip condition int slip_size = mSlipBoundaryList.size(); #pragma omp parallel for firstprivate(slip_size,ym) for (int i_slip = 0; i_slip < slip_size; i_slip++) { unsigned int i_node = mSlipBoundaryList[i_slip]; double dist = mdistances[i_node]; if (dist <= 0.0) { double nu = mMolecularViscosity; //mViscosity[i_node]; //array_1d<double, TDim>& rhs_i = rhs[i_node]; const array_1d<double, TDim>& U_i = vel[i_node]; const array_1d<double, TDim>& an_i = mSlipNormal[i_node]; //compute the modulus of the velocity double mod_vel = 0.0; double area = 0.0; for (unsigned int comp = 0; comp < TDim; comp++) { mod_vel += U_i[comp] * U_i[comp]; area += an_i[comp] * an_i[comp]; } mod_vel = sqrt (mod_vel); area = sqrt (area); //the 0.1 is such that the dissipation is as for the linear case for a velocity of 10m/s diag_stiffness[i_node] = area * nu * mod_vel/ (ym ) * mWallReductionFactor[ i_node ] ; } else { diag_stiffness[i_node] = 0.0 ; } } // //apply higher resistance normally to the edges // int edge_size = medge_nodes_direction.size(); // #pragma omp parallel for firstprivate(edge_size) // for (int i = 0; i < edge_size; i++) // { // int i_node = medge_nodes[i]; // double dist = mdistances[i_node]; // // if(dist <= 0.0) // { // double nu = mViscosity[i_node]; // const array_1d<double, TDim>& an_i = mSlipNormal[i_node]; // // //compute the modulus of the velocity // double area = 0.0; // for (unsigned int comp = 0; comp < TDim; comp++) // { // area += an_i[comp] * an_i[comp]; // } // area = sqrt (area); // // diag_stiffness[i_node] += area * nu / (ym ) ; // // } // } // // int corner_size = mcorner_nodes.size(); // for (int i = 0; i < corner_size; i++) // { // int i_node = mcorner_nodes[i]; // double nu = mViscosity[i_node]; // mWallReductionFactor[i_node] = mcorner_coefficient; //50.0; // const double m = mr_matrix_container.GetLumpedMass()[i_node]; // diag_stiffness[i_node] += 100.0*m * nu / (ym ) ; // } } void ApplySmagorinsky3D (double MolecularViscosity, double Cs) { KRATOS_TRY ModelPart::NodesContainerType& rNodes = mr_model_part.Nodes(); //calculating the RHS array_1d<double, TDim> grad_vx; array_1d<double, TDim> grad_vy; array_1d<double, TDim> grad_vz; int n_nodes = rNodes.size(); mr_matrix_container.FillVectorFromDatabase (VELOCITY, mvel_n1, rNodes); array_1d<double, TDim> stab_high; #pragma omp parallel for private(grad_vx,grad_vy,grad_vz) for (int i_node = 0; i_node < n_nodes; i_node++) { //set to zero the gradients for (unsigned int comp = 0; comp < TDim; comp++) { grad_vx[comp] = 0.0 ; grad_vy[comp] = 0.0 ; grad_vz[comp] = 0.0 ; } //compute node by node the gradients const array_1d<double, TDim>& U_i = mvel_n1[i_node]; const double h = mHmin[i_node]; const double m_inv = mr_matrix_container.GetInvertedMass() [i_node]; for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex() [i_node]; csr_index != mr_matrix_container.GetRowStartIndex() [i_node + 1]; csr_index++) { unsigned int j_neighbour = mr_matrix_container.GetColumnIndex() [csr_index]; const array_1d<double, TDim>& U_j = mvel_n1[j_neighbour]; CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues() [csr_index]; edge_ij.Add_grad_p (grad_vx, U_i[0], U_j[0]); edge_ij.Add_grad_p (grad_vy, U_i[1], U_j[1]); edge_ij.Add_grad_p (grad_vz, U_i[2], U_j[2]); } //finalize computation of the gradients //set to zero the gradients for (unsigned int comp = 0; comp < TDim; comp++) { grad_vx[comp] *= m_inv ; grad_vy[comp] *= m_inv ; grad_vz[comp] *= m_inv ; } //symmetrize and multiply by 2 grad_vx[0] *= 2.0; grad_vy[1] *= 2.0; if(TDim > 2) grad_vz[2] *= 2.0; grad_vx[1] += grad_vy[0]; if(TDim > 2) grad_vx[2] += grad_vz[0]; if(TDim > 2) grad_vy[2] += grad_vz[1]; grad_vy[0] += grad_vx[1]; grad_vz[0] += grad_vx[2]; grad_vz[1] += grad_vy[2]; //compute smagorinsky term double aux = 0.0; for (unsigned int comp = 0; comp < TDim; comp++) { aux += grad_vx[comp] * grad_vx[comp] ; aux += grad_vy[comp] * grad_vy[comp] ; aux += grad_vz[comp] * grad_vz[comp] ; } aux *= 0.5; if (aux < 0.0 ) aux=0.0; double turbulent_viscosity = Cs*h*h*sqrt (aux) /**MolecularViscosity*/; // KRATOS_WATCH(aux); // KRATOS_WATCH(turbulent_viscosity); mViscosity[i_node] = turbulent_viscosity + MolecularViscosity; } mr_matrix_container.WriteScalarToDatabase (VISCOSITY, mViscosity, rNodes); KRATOS_CATCH (""); } void Add_Effective_Inverse_Multiply ( CalcVectorType& destination, const CalcVectorType& origin1, const double value, const ValuesVectorType& mass, const ValuesVectorType& diag_stiffness, const CalcVectorType& origin ) { KRATOS_TRY int loop_size = destination.size(); #pragma omp parallel for for (int i_node = 0; i_node < loop_size; i_node++) { array_1d<double, TDim>& dest = destination[i_node]; const double m = mass[i_node]; const double d = diag_stiffness[i_node]; const array_1d<double, TDim>& origin_vec1 = origin1[i_node]; const array_1d<double, TDim>& origin_value = origin[i_node]; for (unsigned int comp = 0; comp < TDim; comp++) dest[comp] = value / (m + value*d) * ( m/value * origin_vec1[comp] + origin_value[comp] ); } KRATOS_CATCH ("") } void ComputeConvectiveProjection ( CalcVectorType& mPiConvection, const ValuesVectorType& mphi_n1, const ValuesVectorType& mEps, const CalcVectorType& mvel_n1 ) { int n_nodes = mPiConvection.size(); //calculating the convective projection array_1d<double, TDim> a_i; array_1d<double, TDim> a_j; #pragma omp parallel for private(a_i,a_j) for (int i_node = 0; i_node < n_nodes; i_node++) { array_1d<double, TDim>& pi_i = mPiConvection[i_node]; // setting to zero the projection for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) pi_i[l_comp] = 0.0; /* if (active_nodes[i_node] != 0.0) {*/ const double& phi_i = mphi_n1[i_node]; noalias (a_i) = mvel_n1[i_node]; a_i /= mEps[i_node]; // loop to all the edges surrounding node I for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex() [i_node]; csr_index != mr_matrix_container.GetRowStartIndex() [i_node + 1]; csr_index++) { unsigned int j_neighbour = mr_matrix_container.GetColumnIndex() [csr_index]; noalias (a_j) = mvel_n1[j_neighbour]; a_j /= mEps[j_neighbour]; const double& phi_j = mphi_n1[j_neighbour]; CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues() [csr_index]; edge_ij.Add_grad_p (pi_i, phi_i, phi_j); // if(i_node == 3255) // { // KRATOS_WATCH(j_neighbour) // KRATOS_WATCH(pi_i) // KRATOS_WATCH(mEps[i_node]) // KRATOS_WATCH(mEps[j_neighbour]) // KRATOS_WATCH(phi_i) // KRATOS_WATCH(phi_j) // KRATOS_WATCH(a_i) // KRATOS_WATCH(a_j) // KRATOS_WATCH(mr_matrix_container.GetInvertedMass()[i_node]) // KRATOS_WATCH(edge_ij.Ni_DNj) // // } } // apply inverted mass matrix const double m_inv = mr_matrix_container.GetInvertedMass() [i_node]; for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) pi_i[l_comp] *= m_inv; // std::cout << i_node << " " << pi_i << " " << mvel_n1[i_node] << " " << phi_i <<std::endl; // for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) // if(std::isnan(pi_i[l_comp])) // KRATOS_WATCH(m_inv); // } } } void ComputeLimitor ( CalcVectorType& mPiConvection, const ValuesVectorType& mphi_n1, ValuesVectorType& mBeta, const CalcVectorType& mvel_n1, const CalcVectorType& mEdgeDimensions ) { int n_nodes = mPiConvection.size(); #pragma omp parallel for for (int i_node = 0; i_node < n_nodes; i_node++) { const array_1d<double, TDim>& pi_i = mPiConvection[i_node]; const double& p_i = mphi_n1[i_node]; double& beta_i = mBeta[i_node]; beta_i = 0.0; double n = 0.0; for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex() [i_node]; csr_index != mr_matrix_container.GetRowStartIndex() [i_node + 1]; csr_index++) { unsigned int j_neighbour = mr_matrix_container.GetColumnIndex() [csr_index]; const double& p_j = mphi_n1[j_neighbour]; const array_1d<double, TDim>& l_k = mEdgeDimensions[csr_index]; const array_1d<double, TDim>& pi_j = mPiConvection[j_neighbour]; // double proj = 0.0; // for (unsigned int comp = 0; comp < TDim; comp++) // proj += 0.5*l_k[comp]*(pi_i[comp]+pi_j[comp]); // double beta = fabs((p_i - p_j - proj)/(fabs(p_i-p_j)+fabs(proj)+1e-4)); double proj = 0.0; for (unsigned int comp = 0; comp < TDim; comp++) proj += 0.5 * l_k[comp]* (pi_i[comp] + pi_j[comp]); // proj += dir[comp]*pi_i[comp]; double numerator = fabs (fabs (p_j - p_i) - fabs (proj) ); double denom = fabs (fabs (p_j - p_i) + 1e-6); beta_i += numerator / denom; n += 1.0; } beta_i /= n; if (beta_i > 1.0) beta_i = 1.0; } } }; } //namespace Kratos #undef SYMM_PRESS #endif //KRATOS_EDGEBASED_LEVELSET_SUBSTEP_FLUID_SOLVER_H_INCLUDED defined
iter_helper.h
#pragma once #include "util/graph/graph.h" #include "util/timer.h" #include "util/log/log.h" #include "util/search/search_util.h" #include "util/stat.h" #include "pkt_support_update_utils.h" #include "parallel_all_edge_cnc.h" #include "iter_stat_helper.h" #define V_BUFF_SIZE (4096) #define LEVEL_SKIP_SIZE (16) #define MAX_LEVEL (20000) extern size_t tc_cnt; class IterHelper { public: size_t num_edges_; //private: vector <eid_t> histogram_; int omp_num_threads_; graph_t *g; eid_t *compact_num_edges_; vid_t *compact_adj_; eid_t *compact_eid_; // num_edges_ is changed during the shrinking. int n_; public: BoolArray <word_type> processed_; // Origin Edge Offset. eid_t *edge_off_org_; eid_t *level_start_pos_; eid_t *edge_offsets_level_; int level_size_; // Bucket Related. BoolArray <word_type> bucket_removed_indicator_; int bucket_level_end_ = 0; bool *in_bucket_window_; eid_t *bucket_buf_; size_t window_bucket_buf_size_ = 0; size_t total_size_ = 0; // Queue Related. (curr_/next_). long curr_tail_; long next_tail_; eid_t *curr_; BoolArray <word_type> in_curr_; eid_t *next_; BoolArray <word_type> in_next_; // For Graph Shrink. bool *is_vertex_updated_; eid_t *off_end_; vid_t *global_v_buffer_; public: // View (edge list and edge support) int **edge_sup_ptr_; Edge **edge_lst_ptr_; // Shrink Related Extra Memory. eid_t *edge_off_org_shrink_; int *edge_support_shrink_; Edge *edge_lst_shrink_; eid_t *bucket_buf_shrink_; eid_t *edge_lst_relative_off_; // // prefix-sum inclusive eid_t *bucket_relative_off_; // prefix-sum inclusive public: // BSR. vector <vector<int>> partition_id_lst; vector <vector<bmp_word_type>> bitmap_in_partition_lst; void FreeBSR(); public: IterHelper(graph_t *g, int **edge_sup_ptr, Edge **edge_lst_ptr); void MemSetIterVariables(int max_omp_threads); void ComputeTriSupport(IterStatTLS &iter_stat_tls); void SCANGraph(int level); void ShrinkCSREID(volatile eid_t *global_buffer_size, vid_t *local_buffer); void CompactCSREID(); void ShrinkEdgeList(); void MarkProcessed(); void SwapCurNextQueue(); void ProcessSupportZeros(); int TrussDecompositionMergeBased(); void TransferResult(eid_t *&level_start_pos, eid_t *&edge_offsets_level, eid_t *&edge_off_org, int *&edge_sup, Edge *&edge_lst); ~IterHelper(); }; void TriCntDetailSubLevel(graph_t *g, eid_t *curr, BoolArray <word_type> &InCurr, long currTail, int *EdgeSupport, int level, eid_t *next, BoolArray <word_type> &InNext, long *nextTail, BoolArray <word_type> &processed_, Edge *edgeIdtoEdge, eid_t *off_end, bool *is_vertex_updated, IterHelper &iter_helper, volatile eid_t &global_v_buff_size ); extern void invoke_tc_bmp_gpu(graph_t *g, int *edge_sup); /* * F requires a callable or a functor with signature `void (int)` */ template<typename F> int AbstractPKT(graph_t *g, int *&EdgeSupport, Edge *&edgeIdToEdge, IterHelper &iter_helper, F f) { Timer malloc_timer; long numEdges = g->m / 2; auto max_omp_threads = omp_get_max_threads(); log_info("Max Threads: %d", max_omp_threads); #pragma omp parallel num_threads(max_omp_threads) { iter_helper.MemSetIterVariables(max_omp_threads); } log_info("Malloc & MemSet Time: %.6lfs", malloc_timer.elapsed()); vector<double> shrink_time_lst; Timer iter_timer; Timer comp_timer; size_t iter_num = 0; size_t local_iter_num = 0; volatile eid_t global_v_buff_size = 0; size_t num_of_shrinks = 0; vector<int> tc_level; vector<double> tc_level_time; double init_tc_time = 0; double penalty_tc_time = 0; auto ret_level = 0; #pragma omp parallel { auto is_end = false; size_t acc_process_num = 0; double acc_time = 0; // TC. IterStatTLS iter_stat_tls; // if (sizeof(eid_t) == sizeof(uint32_t)) { if (iter_helper.g->m < UINT32_MAX) { #pragma omp single { invoke_tc_bmp_gpu(iter_helper.g, *iter_helper.edge_sup_ptr_); } iter_stat_tls.triTime = iter_stat_tls.local_timer.elapsed_and_reset(); } else { iter_helper.ComputeTriSupport(iter_stat_tls); } #pragma omp single { extern double tc_time; tc_time = iter_stat_tls.triTime; } #pragma omp single { init_tc_time = iter_stat_tls.triTime; iter_timer.reset(); } // Compute Truss. auto *local_buffer = (vid_t *) malloc(sizeof(vid_t) * V_BUFF_SIZE); int level = 0; long acc_deleted = 0; long todo = numEdges; while (todo > 0 && !is_end) { // 1st: Synchronization. #pragma omp single { iter_stat_tls.PrintIterStat(iter_timer, todo, numEdges, level, iter_num, local_iter_num); } iter_stat_tls.ResetLocalTime(); iter_stat_tls.RecordSyncTime(); // Offloading #ifdef SWITCH_EDGE_NUM if (todo < SWITCH_EDGE_NUM && todo > 0) { #else if (todo < 200000000 && todo > 0) { #endif iter_helper.ShrinkCSREID(&global_v_buff_size, local_buffer); iter_helper.CompactCSREID(); iter_helper.ShrinkEdgeList(); // Offload and Return. is_end = true; #pragma omp single { g->m = iter_helper.num_edges_ * 2; ret_level = level; } } else { // 2nd: Scanning the graph to fetch the level. iter_helper.SCANGraph(level); iter_stat_tls.RecordSCANTime(); #ifdef SHRINK_EDGE_LIST #pragma omp single { iter_helper.level_start_pos_[level + 1] = iter_helper.level_start_pos_[level]; } #endif // 3rd: Processing the graph (shrinking and updating supports). while (iter_helper.curr_tail_ > 0) { // Map the curr_ to result array. #ifdef SHRINK_EDGE_LIST #pragma omp for for (auto i = 0; i < max_omp_threads; i++) { auto avg = iter_helper.curr_tail_ / max_omp_threads; auto iter_beg = avg * i; auto iter_end = (i == max_omp_threads - 1) ? iter_helper.curr_tail_ : avg * (i + 1); auto pos_off = iter_helper.level_start_pos_[level + 1]; for (auto iter = iter_beg; iter < iter_end; iter++) { // map operation. iter_helper.edge_offsets_level_[pos_off + iter] = iter_helper.edge_off_org_[iter_helper.curr_[iter]]; } } #endif #pragma omp single { iter_helper.level_start_pos_[level + 1] += iter_helper.curr_tail_; } todo = todo - iter_helper.curr_tail_; iter_stat_tls.RecordQueueSize(iter_helper.curr_tail_); // All of them being the last level. if (todo == 0) { // No need to process but need to copy the results back. level = level + 1; break; } // 3.1: Optional shrinking graph. (Move to here to maximally shrink the graph). if (acc_deleted > numEdges / 200) { #pragma omp barrier Timer shrink_timer; iter_helper.ShrinkCSREID(&global_v_buff_size, local_buffer); acc_deleted = 0; #pragma omp single { iter_stat_tls.RecordShrinkNum(num_of_shrinks); shrink_time_lst.emplace_back(shrink_timer.elapsed()); } } iter_stat_tls.RecordShrinkTime(); if (level == 0) { iter_helper.ProcessSupportZeros(); } else { // 3.2: Real Processing (updating supports). size_t task_size = iter_helper.curr_tail_ * (size_t)(level + 1); size_t left_edge_size = todo; double estimated_tc_time = left_edge_size / (g->m / 2.0) * init_tc_time + penalty_tc_time; double estimated_process_throughput = 2.0 * pow(10, 9); double estimated_peel_time = task_size / estimated_process_throughput; if (estimated_tc_time > estimated_peel_time) { auto to_delete = iter_helper.curr_tail_; f(level); acc_process_num += task_size; acc_deleted += to_delete; } else { #pragma omp single { log_info("Estimated TC Time: %.9lfs, Peel Time: %.9lfs, %.9lf G/s", estimated_tc_time, estimated_peel_time, acc_process_num / acc_time / pow(10, 9)); tc_level.emplace_back(level); log_info("!!!TriCnt!!!, Task-Size: %'zu, TC-Cnt/50: %'zu", task_size, tc_cnt / 50); } Timer tc_timer; TriCntDetailSubLevel(g, iter_helper.curr_, iter_helper.in_curr_, iter_helper.curr_tail_, *iter_helper.edge_sup_ptr_, level, iter_helper.next_, iter_helper.in_next_, &iter_helper.next_tail_, iter_helper.processed_, *iter_helper.edge_lst_ptr_, iter_helper.off_end_, iter_helper.is_vertex_updated_, iter_helper, global_v_buff_size); acc_deleted = 0; #pragma omp single { auto cost = tc_timer.elapsed(); if (estimated_tc_time * 1.2 < cost) { penalty_tc_time += cost - estimated_tc_time; log_info("Penalty TC-Time: %.9lfs", penalty_tc_time); } tc_level_time.emplace_back(cost); } } } // 3.3: Swap Queues. #pragma omp single { iter_helper.SwapCurNextQueue(); iter_stat_tls.RecordIterNum(iter_num, local_iter_num); } #pragma omp barrier iter_stat_tls.RecordProcessTime(); } level = level + 1; #pragma omp barrier // End of Iterative Peeling for this Level. } } // The end. #pragma omp single { iter_helper.level_size_ = level; log_info("Total Levels: %d", iter_helper.level_size_); log_trace("Last Level Finished: %d, Elapsed Time: %.9lfs, Left/Total: %'lld/%'lld, " "Local/Global-Iter#: %zu/%zu", level - 1, iter_timer.elapsed_and_reset(), todo, numEdges, local_iter_num, iter_num); iter_stat_tls.PrintFinalStat(level, num_of_shrinks); stringstream ss; ss << tc_level << ", Time: " << tc_level_time; log_trace("TC-levels: %s", ss.str().c_str()); stringstream ss2; ss2 << shrink_time_lst; log_trace("Shrink Time List: %s", ss2.str().c_str()); } free(local_buffer); } //End of parallel region log_info("Total computation cost: %.9lfs", comp_timer.elapsed_and_reset()); return ret_level; }
GB_binop__max_fp64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__max_fp64 // A.*B function (eWiseMult): GB_AemultB__max_fp64 // A*D function (colscale): GB_AxD__max_fp64 // D*A function (rowscale): GB_DxB__max_fp64 // C+=B function (dense accum): GB_Cdense_accumB__max_fp64 // C+=b function (dense accum): GB_Cdense_accumb__max_fp64 // C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__max_fp64 // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__max_fp64 // C=scalar+B GB_bind1st__max_fp64 // C=scalar+B' GB_bind1st_tran__max_fp64 // C=A+scalar GB_bind2nd__max_fp64 // C=A'+scalar GB_bind2nd_tran__max_fp64 // C type: double // A type: double // B,b type: double // BinaryOp: cij = fmax (aij, bij) #define GB_ATYPE \ double #define GB_BTYPE \ double #define GB_CTYPE \ double // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ double bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ double t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = fmax (x, y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MAX || GxB_NO_FP64 || GxB_NO_MAX_FP64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB_Cdense_ewise3_accum__max_fp64 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__max_fp64 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__max_fp64 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__max_fp64 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type double double bwork = (*((double *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__max_fp64 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *GB_RESTRICT Cx = (double *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__max_fp64 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *GB_RESTRICT Cx = (double *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__max_fp64 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__max_fp64 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__max_fp64 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *Cx = (double *) Cx_output ; double x = (*((double *) x_input)) ; double *Bx = (double *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; double bij = Bx [p] ; Cx [p] = fmax (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__max_fp64 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; double *Cx = (double *) Cx_output ; double *Ax = (double *) Ax_input ; double y = (*((double *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; double aij = Ax [p] ; Cx [p] = fmax (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = Ax [pA] ; \ Cx [pC] = fmax (x, aij) ; \ } GrB_Info GB_bind1st_tran__max_fp64 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ double #if GB_DISABLE return (GrB_NO_VALUE) ; #else double x = (*((const double *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ double } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = Ax [pA] ; \ Cx [pC] = fmax (aij, y) ; \ } GrB_Info GB_bind2nd_tran__max_fp64 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double y = (*((const double *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
vector.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ /****************************************************************************** * * Member functions for hypre_Vector class. * *****************************************************************************/ #include "seq_mv.h" #include "_hypre_utilities.hpp" //RL: TODO vector_device.c, include cuda there /*-------------------------------------------------------------------------- * hypre_SeqVectorCreate *--------------------------------------------------------------------------*/ hypre_Vector * hypre_SeqVectorCreate( HYPRE_Int size ) { hypre_Vector *vector; vector = hypre_CTAlloc(hypre_Vector, 1, HYPRE_MEMORY_HOST); hypre_VectorData(vector) = NULL; hypre_VectorSize(vector) = size; hypre_VectorNumVectors(vector) = 1; hypre_VectorMultiVecStorageMethod(vector) = 0; /* set defaults */ hypre_VectorOwnsData(vector) = 1; hypre_VectorMemoryLocation(vector) = hypre_HandleMemoryLocation(hypre_handle()); return vector; } /*-------------------------------------------------------------------------- * hypre_SeqMultiVectorCreate *--------------------------------------------------------------------------*/ hypre_Vector * hypre_SeqMultiVectorCreate( HYPRE_Int size, HYPRE_Int num_vectors ) { hypre_Vector *vector = hypre_SeqVectorCreate(size); hypre_VectorNumVectors(vector) = num_vectors; return vector; } /*-------------------------------------------------------------------------- * hypre_SeqVectorDestroy *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SeqVectorDestroy( hypre_Vector *vector ) { HYPRE_Int ierr=0; if (vector) { HYPRE_MemoryLocation memory_location = hypre_VectorMemoryLocation(vector); if ( hypre_VectorOwnsData(vector) ) { hypre_TFree(hypre_VectorData(vector), memory_location); } hypre_TFree(vector, HYPRE_MEMORY_HOST); } return ierr; } /*-------------------------------------------------------------------------- * hypre_SeqVectorInitialize *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SeqVectorInitialize_v2( hypre_Vector *vector, HYPRE_MemoryLocation memory_location ) { HYPRE_Int size = hypre_VectorSize(vector); HYPRE_Int ierr = 0; HYPRE_Int num_vectors = hypre_VectorNumVectors(vector); HYPRE_Int multivec_storage_method = hypre_VectorMultiVecStorageMethod(vector); hypre_VectorMemoryLocation(vector) = memory_location; /* Caveat: for pre-existing data, the memory location must be guaranteed * to be consistent with `memory_location' * Otherwise, mismatches will exist and problems will be encountered * when being used, and freed */ if ( !hypre_VectorData(vector) ) { hypre_VectorData(vector) = hypre_CTAlloc(HYPRE_Complex, num_vectors*size, memory_location); } if ( multivec_storage_method == 0 ) { hypre_VectorVectorStride(vector) = size; hypre_VectorIndexStride(vector) = 1; } else if ( multivec_storage_method == 1 ) { hypre_VectorVectorStride(vector) = 1; hypre_VectorIndexStride(vector) = num_vectors; } else { ++ierr; } return ierr; } HYPRE_Int hypre_SeqVectorInitialize( hypre_Vector *vector ) { HYPRE_Int ierr; ierr = hypre_SeqVectorInitialize_v2( vector, hypre_VectorMemoryLocation(vector) ); return ierr; } /*-------------------------------------------------------------------------- * hypre_SeqVectorSetDataOwner *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SeqVectorSetDataOwner( hypre_Vector *vector, HYPRE_Int owns_data ) { HYPRE_Int ierr=0; hypre_VectorOwnsData(vector) = owns_data; return ierr; } /*-------------------------------------------------------------------------- * ReadVector *--------------------------------------------------------------------------*/ hypre_Vector * hypre_SeqVectorRead( char *file_name ) { hypre_Vector *vector; FILE *fp; HYPRE_Complex *data; HYPRE_Int size; HYPRE_Int j; /*---------------------------------------------------------- * Read in the data *----------------------------------------------------------*/ fp = fopen(file_name, "r"); hypre_fscanf(fp, "%d", &size); vector = hypre_SeqVectorCreate(size); hypre_VectorMemoryLocation(vector) = HYPRE_MEMORY_HOST; hypre_SeqVectorInitialize(vector); data = hypre_VectorData(vector); for (j = 0; j < size; j++) { hypre_fscanf(fp, "%le", &data[j]); } fclose(fp); /* multivector code not written yet */ hypre_assert( hypre_VectorNumVectors(vector) == 1 ); return vector; } /*-------------------------------------------------------------------------- * hypre_SeqVectorPrint *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SeqVectorPrint( hypre_Vector *vector, char *file_name ) { FILE *fp; HYPRE_Complex *data; HYPRE_Int size, num_vectors, vecstride, idxstride; HYPRE_Int i, j; HYPRE_Complex value; HYPRE_Int ierr = 0; num_vectors = hypre_VectorNumVectors(vector); vecstride = hypre_VectorVectorStride(vector); idxstride = hypre_VectorIndexStride(vector); /*---------------------------------------------------------- * Print in the data *----------------------------------------------------------*/ data = hypre_VectorData(vector); size = hypre_VectorSize(vector); fp = fopen(file_name, "w"); if ( hypre_VectorNumVectors(vector) == 1 ) { hypre_fprintf(fp, "%d\n", size); } else { hypre_fprintf(fp, "%d vectors of size %d\n", num_vectors, size ); } if ( num_vectors>1 ) { for ( j=0; j<num_vectors; ++j ) { hypre_fprintf(fp, "vector %d\n", j ); for (i = 0; i < size; i++) { value = data[ j*vecstride + i*idxstride ]; #ifdef HYPRE_COMPLEX hypre_fprintf(fp, "%.14e , %.14e\n", hypre_creal(value), hypre_cimag(value)); #else hypre_fprintf(fp, "%.14e\n", value); #endif } } } else { for (i = 0; i < size; i++) { #ifdef HYPRE_COMPLEX hypre_fprintf(fp, "%.14e , %.14e\n", hypre_creal(data[i]), hypre_cimag(data[i])); #else hypre_fprintf(fp, "%.14e\n", data[i]); #endif } } fclose(fp); return ierr; } /*-------------------------------------------------------------------------- * hypre_SeqVectorSetConstantValues *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SeqVectorSetConstantValues( hypre_Vector *v, HYPRE_Complex value ) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_BLAS1] -= hypre_MPI_Wtime(); #endif HYPRE_Complex *vector_data = hypre_VectorData(v); HYPRE_Int size = hypre_VectorSize(v); HYPRE_Int ierr = 0; size *= hypre_VectorNumVectors(v); //hypre_SeqVectorPrefetch(v, HYPRE_MEMORY_DEVICE); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) if (size > 0) { HYPRE_THRUST_CALL( fill_n, vector_data, size, value ); } #else HYPRE_Int i; #if defined(HYPRE_USING_DEVICE_OPENMP) #pragma omp target teams distribute parallel for private(i) is_device_ptr(vector_data) #elif defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { vector_data[i] = value; } #endif /* defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) */ #if defined(HYPRE_USING_GPU) hypre_SyncCudaComputeStream(hypre_handle()); #endif #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_BLAS1] += hypre_MPI_Wtime(); #endif return ierr; } /*-------------------------------------------------------------------------- * hypre_SeqVectorSetRandomValues * * returns vector of values randomly distributed between -1.0 and +1.0 *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SeqVectorSetRandomValues( hypre_Vector *v, HYPRE_Int seed ) { HYPRE_Complex *vector_data = hypre_VectorData(v); HYPRE_Int size = hypre_VectorSize(v); HYPRE_Int i; HYPRE_Int ierr = 0; hypre_SeedRand(seed); size *= hypre_VectorNumVectors(v); if (hypre_GetActualMemLocation(hypre_VectorMemoryLocation(v)) == hypre_MEMORY_HOST) { /* RDF: threading this loop may cause problems because of hypre_Rand() */ for (i = 0; i < size; i++) { vector_data[i] = 2.0 * hypre_Rand() - 1.0; } } else { HYPRE_Complex *h_data = hypre_TAlloc(HYPRE_Complex, size, HYPRE_MEMORY_HOST); for (i = 0; i < size; i++) { h_data[i] = 2.0 * hypre_Rand() - 1.0; } hypre_TMemcpy(vector_data, h_data, HYPRE_Complex, size, hypre_VectorMemoryLocation(v), HYPRE_MEMORY_HOST); hypre_TFree(h_data, HYPRE_MEMORY_HOST); } return ierr; } /*-------------------------------------------------------------------------- * hypre_SeqVectorCopy * copies data from x to y * if size of x is larger than y only the first size_y elements of x are * copied to y *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SeqVectorCopy( hypre_Vector *x, hypre_Vector *y ) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_BLAS1] -= hypre_MPI_Wtime(); #endif HYPRE_Int ierr = 0; size_t size = hypre_min( hypre_VectorSize(x), hypre_VectorSize(y) ) * hypre_VectorNumVectors(x); hypre_TMemcpy( hypre_VectorData(y), hypre_VectorData(x), HYPRE_Complex, size, hypre_VectorMemoryLocation(y), hypre_VectorMemoryLocation(x) ); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_BLAS1] += hypre_MPI_Wtime(); #endif return ierr; } /*-------------------------------------------------------------------------- * hypre_SeqVectorCloneDeep * Returns a complete copy of x - a deep copy, with its own copy of the data. *--------------------------------------------------------------------------*/ hypre_Vector* hypre_SeqVectorCloneDeep_v2( hypre_Vector *x, HYPRE_MemoryLocation memory_location ) { HYPRE_Int size = hypre_VectorSize(x); HYPRE_Int num_vectors = hypre_VectorNumVectors(x); hypre_Vector *y = hypre_SeqMultiVectorCreate( size, num_vectors ); hypre_VectorMultiVecStorageMethod(y) = hypre_VectorMultiVecStorageMethod(x); hypre_VectorVectorStride(y) = hypre_VectorVectorStride(x); hypre_VectorIndexStride(y) = hypre_VectorIndexStride(x); hypre_SeqVectorInitialize_v2(y, memory_location); hypre_SeqVectorCopy( x, y ); return y; } hypre_Vector* hypre_SeqVectorCloneDeep( hypre_Vector *x ) { return hypre_SeqVectorCloneDeep_v2(x, hypre_VectorMemoryLocation(x)); } /*-------------------------------------------------------------------------- * hypre_SeqVectorCloneShallow * Returns a complete copy of x - a shallow copy, pointing the data of x *--------------------------------------------------------------------------*/ hypre_Vector * hypre_SeqVectorCloneShallow( hypre_Vector *x ) { HYPRE_Int size = hypre_VectorSize(x); HYPRE_Int num_vectors = hypre_VectorNumVectors(x); hypre_Vector * y = hypre_SeqMultiVectorCreate( size, num_vectors ); hypre_VectorMultiVecStorageMethod(y) = hypre_VectorMultiVecStorageMethod(x); hypre_VectorVectorStride(y) = hypre_VectorVectorStride(x); hypre_VectorIndexStride(y) = hypre_VectorIndexStride(x); hypre_VectorMemoryLocation(y) = hypre_VectorMemoryLocation(x); hypre_VectorData(y) = hypre_VectorData(x); hypre_SeqVectorSetDataOwner( y, 0 ); hypre_SeqVectorInitialize(y); return y; } /*-------------------------------------------------------------------------- * hypre_SeqVectorScale *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SeqVectorScale( HYPRE_Complex alpha, hypre_Vector *y ) { /* special cases */ if (alpha == 1.0) { return 0; } if (alpha == 0.0) { return hypre_SeqVectorSetConstantValues(y, 0.0); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_BLAS1] -= hypre_MPI_Wtime(); #endif HYPRE_Complex *y_data = hypre_VectorData(y); HYPRE_Int size = hypre_VectorSize(y); HYPRE_Int ierr = 0; size *= hypre_VectorNumVectors(y); //hypre_SeqVectorPrefetch(y, HYPRE_MEMORY_DEVICE); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) #if defined(HYPRE_USING_CUBLAS) HYPRE_CUBLAS_CALL( cublasDscal(hypre_HandleCublasHandle(hypre_handle()), size, &alpha, y_data, 1) ); #else HYPRE_THRUST_CALL( transform, y_data, y_data + size, y_data, alpha * _1 ); #endif #else HYPRE_Int i; #if defined(HYPRE_USING_DEVICE_OPENMP) #pragma omp target teams distribute parallel for private(i) is_device_ptr(y_data) #elif defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { y_data[i] *= alpha; } #endif /* defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) */ #if defined(HYPRE_USING_GPU) hypre_SyncCudaComputeStream(hypre_handle()); #endif #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_BLAS1] += hypre_MPI_Wtime(); #endif return ierr; } /*-------------------------------------------------------------------------- * hypre_SeqVectorAxpy *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SeqVectorAxpy( HYPRE_Complex alpha, hypre_Vector *x, hypre_Vector *y ) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_BLAS1] -= hypre_MPI_Wtime(); #endif HYPRE_Complex *x_data = hypre_VectorData(x); HYPRE_Complex *y_data = hypre_VectorData(y); HYPRE_Int size = hypre_VectorSize(x); HYPRE_Int ierr = 0; size *= hypre_VectorNumVectors(x); //hypre_SeqVectorPrefetch(x, HYPRE_MEMORY_DEVICE); //hypre_SeqVectorPrefetch(y, HYPRE_MEMORY_DEVICE); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) #if defined(HYPRE_USING_CUBLAS) HYPRE_CUBLAS_CALL( cublasDaxpy(hypre_HandleCublasHandle(hypre_handle()), size, &alpha, x_data, 1, y_data, 1) ); #else HYPRE_THRUST_CALL( transform, x_data, x_data + size, y_data, y_data, alpha * _1 + _2 ); #endif #else HYPRE_Int i; #if defined(HYPRE_USING_DEVICE_OPENMP) #pragma omp target teams distribute parallel for private(i) is_device_ptr(y_data, x_data) #elif defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { y_data[i] += alpha * x_data[i]; } #endif /* defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) */ #if defined(HYPRE_USING_GPU) hypre_SyncCudaComputeStream(hypre_handle()); #endif #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_BLAS1] += hypre_MPI_Wtime(); #endif return ierr; } /* y = y + x ./ b */ HYPRE_Int hypre_SeqVectorElmdivpy( hypre_Vector *x, hypre_Vector *b, hypre_Vector *y ) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_BLAS1] -= hypre_MPI_Wtime(); #endif HYPRE_Complex *x_data = hypre_VectorData(x); HYPRE_Complex *b_data = hypre_VectorData(b); HYPRE_Complex *y_data = hypre_VectorData(y); HYPRE_Int size = hypre_VectorSize(b); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) //HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy2( hypre_VectorMemoryLocation(x), hypre_VectorMemoryLocation(b) ); //RL: TODO back to hypre_GetExecPolicy2 later HYPRE_ExecutionPolicy exec = HYPRE_EXEC_DEVICE; if (exec == HYPRE_EXEC_DEVICE) { //TODO //hypre_SeqVectorElmdivpyDevice(x, b, y); /* #if defined(HYPRE_USING_DEVICE_OPENMP) #pragma omp target teams distribute parallel for private(i) is_device_ptr(u_data,v_data,l1_norms) #endif */ hypreDevice_IVAXPY(size, b_data, x_data, y_data); } else #endif { HYPRE_Int i; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { y_data[i] += x_data[i] / b_data[i]; } } #if defined(HYPRE_USING_GPU) hypre_SyncCudaComputeStream(hypre_handle()); #endif #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_BLAS1] += hypre_MPI_Wtime(); #endif return hypre_error_flag; } /* y[i] += x[i] / b[i] where marker[i] == marker_val */ HYPRE_Int hypre_SeqVectorElmdivpyMarked( hypre_Vector *x, hypre_Vector *b, hypre_Vector *y, HYPRE_Int *marker, HYPRE_Int marker_val) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_BLAS1] -= hypre_MPI_Wtime(); #endif HYPRE_Complex *x_data = hypre_VectorData(x); HYPRE_Complex *b_data = hypre_VectorData(b); HYPRE_Complex *y_data = hypre_VectorData(y); HYPRE_Int size = hypre_VectorSize(b); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy2( hypre_VectorMemoryLocation(x), hypre_VectorMemoryLocation(b) ); if (exec == HYPRE_EXEC_DEVICE) { hypreDevice_IVAXPYMarked(size, b_data, x_data, y_data, marker, marker_val); } else #endif { HYPRE_Int i; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { if (marker[i] == marker_val) { y_data[i] += x_data[i] / b_data[i]; } } } #if defined(HYPRE_USING_GPU) hypre_SyncCudaComputeStream(hypre_handle()); #endif #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_BLAS1] += hypre_MPI_Wtime(); #endif return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_SeqVectorInnerProd *--------------------------------------------------------------------------*/ HYPRE_Real hypre_SeqVectorInnerProd( hypre_Vector *x, hypre_Vector *y ) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_BLAS1] -= hypre_MPI_Wtime(); #endif HYPRE_Complex *x_data = hypre_VectorData(x); HYPRE_Complex *y_data = hypre_VectorData(y); HYPRE_Int size = hypre_VectorSize(x); HYPRE_Real result = 0.0; size *= hypre_VectorNumVectors(x); //hypre_SeqVectorPrefetch(x, HYPRE_MEMORY_DEVICE); //hypre_SeqVectorPrefetch(y, HYPRE_MEMORY_DEVICE); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) #ifndef HYPRE_COMPLEX #if defined(HYPRE_USING_CUBLAS) HYPRE_CUBLAS_CALL( cublasDdot(hypre_HandleCublasHandle(hypre_handle()), size, x_data, 1, y_data, 1, &result) ); #else result = HYPRE_THRUST_CALL( inner_product, x_data, x_data + size, y_data, 0.0 ); #endif #else /* TODO */ #error "Complex inner product" #endif #else /* #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) */ HYPRE_Int i; #if defined(HYPRE_USING_DEVICE_OPENMP) #pragma omp target teams distribute parallel for private(i) reduction(+:result) is_device_ptr(y_data,x_data) map(result) #elif defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) reduction(+:result) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { result += hypre_conj(y_data[i]) * x_data[i]; } #endif /* defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) */ #if defined(HYPRE_USING_GPU) hypre_SyncCudaComputeStream(hypre_handle()); #endif #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_BLAS1] += hypre_MPI_Wtime(); #endif return result; } //TODO /*-------------------------------------------------------------------------- * hypre_VectorSumElts: * Returns the sum of all vector elements. *--------------------------------------------------------------------------*/ HYPRE_Complex hypre_SeqVectorSumElts( hypre_Vector *vector ) { HYPRE_Complex sum = 0; HYPRE_Complex *data = hypre_VectorData( vector ); HYPRE_Int size = hypre_VectorSize( vector ); HYPRE_Int i; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) reduction(+:sum) HYPRE_SMP_SCHEDULE #endif for ( i=0; i<size; ++i ) sum += data[i]; return sum; } HYPRE_Int hypre_SeqVectorPrefetch( hypre_Vector *x, HYPRE_MemoryLocation memory_location) { HYPRE_Int ierr = 0; #ifdef HYPRE_USING_UNIFIED_MEMORY if (hypre_VectorMemoryLocation(x) != HYPRE_MEMORY_DEVICE) { /* hypre_error_w_msg(HYPRE_ERROR_GENERIC," Error! CUDA Prefetch with non-unified momory\n");*/ return 1; } HYPRE_Complex *x_data = hypre_VectorData(x); HYPRE_Int size = hypre_VectorSize(x) * hypre_VectorNumVectors(x); if (size == 0) { return ierr; } hypre_MemPrefetch(x_data, sizeof(HYPRE_Complex)*size, memory_location); #endif return ierr; } #if 0 /* y[i] = max(alpha*x[i], beta*y[i]) */ HYPRE_Int hypre_SeqVectorMax( HYPRE_Complex alpha, hypre_Vector *x, HYPRE_Complex beta, hypre_Vector *y ) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_BLAS1] -= hypre_MPI_Wtime(); #endif HYPRE_Complex *x_data = hypre_VectorData(x); HYPRE_Complex *y_data = hypre_VectorData(y); HYPRE_Int size = hypre_VectorSize(x); HYPRE_Int ierr = 0; size *= hypre_VectorNumVectors(x); //hypre_SeqVectorPrefetch(x, HYPRE_MEMORY_DEVICE); //hypre_SeqVectorPrefetch(y, HYPRE_MEMORY_DEVICE); thrust::maximum<HYPRE_Complex> mx; #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_THRUST_CALL( transform, thrust::make_transform_iterator(x_data, alpha * _1), thrust::make_transform_iterator(x_data + size, alpha * _1), thrust::make_transform_iterator(y_data, beta * _1), y_data, mx ); #else HYPRE_Int i; #if defined(HYPRE_USING_DEVICE_OPENMP) #pragma omp target teams distribute parallel for private(i) is_device_ptr(y_data, x_data) #elif defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { y_data[i] += hypre_max(alpha * x_data[i], beta * y_data[i]); } #endif /* defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) */ hypre_SyncCudaComputeStream(hypre_handle()); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_BLAS1] += hypre_MPI_Wtime(); #endif return ierr; } #endif
uts_shm.c
/* * ---- The Unbalanced Tree Search (UTS) Benchmark ---- * * Copyright (c) 2010 See AUTHORS file for copyright holders * * This file is part of the unbalanced tree search benchmark. This * project is licensed under the MIT Open Source license. See the LICENSE * file for copyright and licensing information. * * UTS is a collaborative project between researchers at the University of * Maryland, the University of North Carolina at Chapel Hill, and the Ohio * State University. See AUTHORS file for more information. * * One each node, a set of MAX_NUM_THREADS steal stacks is allocated in the * symmetric heap. The shmem_my_pe()-th member of this set is the stack * associated with this PE, which is primarily a set of node slots (Node) that * is the backing data of the stack with length MAXSTACKDEPTH and a top pointer * that is the index of the lowest empty slot in the stack. The stack * for PE 0 is initialized with a single Node before execution really starts. * * Each stack for each PE is separated in to three sections: the local section, * the non-local and non-stolen section, and the non-local and stolen section. * The non-local and stolen section is at the base, from index 0 to * ss.sharedStart. This region represents the locally created nodes that have * been stolen from the current PE by another PE. The non-local and non-stolen * section goes from ss.sharedStart to ss.local. This section contains nodes * that were locally created and are eligible for stealing, but have not been * stolen yet. These nodes may also be reclaimed in to the local section by * this PE if it needs more work. The local section is above that, from * ss.local to ss.top and contains locally created work that only this PE has * access to. If work-stealing is enabled and the size of the local section is * too big (as defined by chunkSize), then the local region is reduced by * incrementing ss.local to allow other PEs to steal work from this PE. * * During the parallel tree search, we loop until there are no more local nodes * left. Every time we visit a node we generate a certain number of children * for that node and push each of them on to the stack. Once we run out of * local work, we try to acquire locally-created work from the non-local region * by decrementing local (ss_acquire). If that fails, we then must try to find * a remote PE to steal from and grab a chunk of nodes from it using OpenSHMEM * copies (ss_steal) and placing them on the local stack of our PE. If this * succeeds, we loop back around to processing these nodes. * * If we are unable to find work locally or remotely, we enter a cancellable * barrier that waits for all PEs to enter it. This barrier is cancelled if any * PEs release nodes for stealing. If the barrier completes successfully with * all nodes having entered it, the search exits. * */ #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include "uts.h" /*********************************************************** * * * Compiler Type (these flags are set by at compile time) * * (default) ANSI C compiler - sequential execution * * (_OPENMP) OpenMP enabled C compiler * * (__UPC__) UPC compiler * * (_SHMEM) Cray Shmem * * (__PTHREADS__) Pthreads multithreaded execution * * * ***********************************************************/ /**** OpenMP Definitions ****/ #ifdef _OPENMP #include <omp.h> #define PARALLEL 1 #define COMPILER_TYPE 1 #define SHARED #define SHARED_INDEF #define VOLATILE volatile #define MAX_THREADS 32 #define LOCK_T omp_lock_t #define GET_NUM_THREADS omp_get_num_threads() #define GET_THREAD_NUM omp_get_thread_num() #define SET_LOCK(zlk) omp_set_lock(zlk) #define UNSET_LOCK(zlk) omp_unset_lock(zlk) #define INIT_LOCK(zlk) zlk=omp_global_lock_alloc() #define INIT_SINGLE_LOCK(zlk) zlk=omp_global_lock_alloc() #define SMEMCPY memcpy #define ALLOC malloc #define BARRIER // OpenMP helper function to match UPC lock allocation semantics omp_lock_t * omp_global_lock_alloc() { omp_lock_t *lock = (omp_lock_t *) malloc(sizeof(omp_lock_t) + 128); omp_init_lock(lock); return lock; } /**** UPC Definitions ****/ #elif defined(__UPC__) #include <upc.h> #define PARALLEL 1 #define COMPILER_TYPE 2 #define SHARED shared #define SHARED_INDEF shared [0] #define VOLATILE strict #define MAX_THREADS (THREADS) #define LOCK_T upc_lock_t #define GET_NUM_THREADS (THREADS) #define GET_THREAD_NUM (MYTHREAD) #define SET_LOCK(zlk) upc_lock(zlk) #define UNSET_LOCK(zlk) upc_unlock(zlk) #define INIT_LOCK(zlk) zlk=upc_global_lock_alloc() #define INIT_SINGLE_LOCK(zlk) zlk=upc_all_lock_alloc() #define SMEMCPY upc_memget #define ALLOC upc_alloc #define BARRIER upc_barrier; /**** Shmem Definitions ****/ #elif defined(_SHMEM) #include <shmem.h> #define PARALLEL 1 #define COMPILER_TYPE 3 #define SHARED #define SHARED_INDEF #define VOLATILE volatile #define MAX_THREADS 1024 #define LOCK_T long #define GET_NUM_THREADS shmem_n_pes() #define GET_THREAD_NUM shmem_my_pe() #define SET_LOCK(zlk) shmem_set_lock(zlk) #define UNSET_LOCK(zlk) shmem_clear_lock(zlk) #define INIT_LOCK(zlk) zlk = shmem_global_lock_alloc() #define INIT_SINGLE_LOCK(zlk) zlk = shmem_global_lock_alloc() #define SMEMCPY shmem_getmem // Shmem's get has different semantics from memcpy(): // void shmem_getmem(void *target, const void *source, size_t len, int pe) #define ALLOC shmem_malloc #define BARRIER shmem_barrier_all(); // Shmem helper function to match UPC lock allocation semantics LOCK_T * shmem_global_lock_alloc() { LOCK_T *lock = (LOCK_T *) shmem_malloc(sizeof(LOCK_T)); *lock = 0; shmem_barrier_all(); return lock; } #define GET(target,source,from_id) shmem_int_get(&(target),&(source),1,from_id) #define PUT(target,source,to_id) shmem_int_put(&(target),&(source),1,to_id) #define PUT_ALL(a,b) \ do { \ int _iter, _node; \ for (_iter = 1; _iter < GET_NUM_THREADS; _iter++) { \ _node = (GET_THREAD_NUM + _iter) % GET_NUM_THREADS; \ shmem_int_put((int *)&a,(int *)&b,1,_node); \ } \ } while(0) /**** Pthreads Definitions ****/ #elif defined(__PTHREADS__) #include <pthread.h> #define PARALLEL 1 #define COMPILER_TYPE 4 #define SHARED #define SHARED_INDEF #define VOLATILE volatile #define MAX_THREADS 128 #define LOCK_T pthread_mutex_t #define GET_NUM_THREADS pthread_num_threads #define GET_THREAD_NUM *(int*)pthread_getspecific(pthread_thread_num) #define SET_LOCK(zlk) pthread_mutex_lock(zlk) #define UNSET_LOCK(zlk) pthread_mutex_unlock(zlk) #define INIT_LOCK(zlk) zlk = pthread_global_lock_alloc() #define INIT_SINGLE_LOCK(zlk) zlk = pthread_global_lock_alloc() #define SMEMCPY memcpy #define ALLOC malloc #define BARRIER int pthread_num_threads = 1; // Command line parameter - default to 1 pthread_key_t pthread_thread_num; // Key to store each thread's ID /* helper function to match UPC lock allocation semantics */ LOCK_T * pthread_global_lock_alloc() { LOCK_T *lock = (LOCK_T *) malloc(sizeof(LOCK_T)); pthread_mutex_init(lock, NULL); return lock; } /**** Default Sequential Definitions ****/ #else #define PARALLEL 0 #define COMPILER_TYPE 0 #define SHARED #define SHARED_INDEF #define VOLATILE #define MAX_THREADS 1 #define LOCK_T void #define GET_NUM_THREADS 1 #define GET_THREAD_NUM 0 #define SET_LOCK(zlk) #define UNSET_LOCK(zlk) #define INIT_LOCK(zlk) #define INIT_SINGLE_LOCK(zlk) #define SMEMCPY memcpy #define ALLOC malloc #define BARRIER #endif /* END Par. Model Definitions */ /*********************************************************** * Parallel execution parameters * ***********************************************************/ int doSteal = PARALLEL; // 1 => use work stealing int chunkSize = 20; // number of nodes to move to/from shared area int cbint = 64; // Cancellable barrier polling interval int pollint = 1; // BUPC Polling interval #ifdef __BERKELEY_UPC__ /* BUPC nonblocking I/O Handles */ bupc_handle_t cb_handle = BUPC_COMPLETE_HANDLE; const int local_cb_cancel = 1; #endif /*********************************************************** * Tree statistics (if selected via UTS_STAT) * * compute overall size and imbalance metrics * * and histogram size and imbalance per level * ***********************************************************/ #ifdef UTS_STAT /* Check that we are not being asked to compile parallel with stats. * Parallel stats collection is presently not supported. */ #if PARALLEL #error "ERROR: Parallel stats collection is not supported!" #endif #define MAXHISTSIZE 2000 // max tree depth in histogram int stats = 1; int unbType = 1; int maxHeight = 0; // maximum depth of tree double maxImb = 0; // maximum imbalance double minImb = 1; double treeImb =-1; // Overall imbalance, undefined int hist[MAXHISTSIZE+1][2]; // average # nodes per level double unbhist[MAXHISTSIZE+1][3]; // average imbalance per level int *rootSize; // size of the root's children double *rootUnb; // imbalance of root's children /* Tseng statistics */ int totalNodes = 0; double imb_max = 0; // % of work in largest child (ranges from 100/n to 100%) double imb_avg = 0; double imb_devmaxavg = 0; // ( % of work in largest child ) - ( avg work ) double imb_normdevmaxavg = 0; // ( % of work in largest child ) - ( avg work ) / ( 100% - avg work ) #else int stats = 0; int unbType = -1; #endif /*********************************************************** * Execution Tracing * ***********************************************************/ #define SS_WORK 0 #define SS_SEARCH 1 #define SS_IDLE 2 #define SS_OVH 3 #define SS_CBOVH 4 #define SS_NSTATES 5 /* session record for session visualization */ struct sessionRecord_t { double startTime, endTime; }; typedef struct sessionRecord_t SessionRecord; /* steal record for steal visualization */ struct stealRecord_t { long int nodeCount; /* count nodes generated during the session */ int victimThread; /* thread from which we stole the work */ }; typedef struct stealRecord_t StealRecord; /* Store debugging and trace data */ struct metaData_t { SessionRecord sessionRecords[SS_NSTATES][20000]; /* session time records */ StealRecord stealRecords[20000]; /* steal records */ }; typedef struct metaData_t MetaData; /* holds text string for debugging info */ char debug_str[1000]; /*********************************************************** * StealStack types * ***********************************************************/ #define MAXSTACKDEPTH 500000 /* stack of nodes */ struct stealStack_t { int stackSize; /* total space avail (in number of elements) */ int workAvail; /* elements available for stealing */ int sharedStart; /* index of start of shared portion of stack */ int local; /* index of start of local portion */ int top; /* index of stack top */ int maxStackDepth; /* stack stats */ int nNodes, maxTreeDepth; /* tree stats */ int nLeaves; int nAcquire, nRelease, nSteal, nFail; /* steal stats */ int wakeups, falseWakeups, nNodes_last; double time[SS_NSTATES], timeLast; /* perf measurements */ int entries[SS_NSTATES], curState; LOCK_T * stackLock; /* lock for manipulation of shared portion */ Node * stack; /* addr of actual stack of nodes in local addr space */ SHARED_INDEF Node * stack_g; /* addr of same stack in global addr space */ #ifdef TRACE MetaData * md; /* meta data used for debugging and tracing */ #endif }; typedef struct stealStack_t StealStack; typedef SHARED StealStack * SharedStealStackPtr; /*********************************************************** * Global shared state * ***********************************************************/ // shared access to each thread's stealStack SHARED SharedStealStackPtr stealStack[MAX_THREADS]; // termination detection VOLATILE SHARED int cb_cancel; VOLATILE SHARED int cb_count; VOLATILE SHARED int cb_done; LOCK_T * cb_lock; SHARED double startTime[MAX_THREADS]; /*********************************************************** * UTS Implementation Hooks * ***********************************************************/ // Return a string describing this implementation char * impl_getName() { char * name[] = {"Sequential C", "C/OpenMP", "UPC", "SHMEM", "PThreads"}; return name[COMPILER_TYPE]; } // construct string with all parameter settings int impl_paramsToStr(char *strBuf, int ind) { ind += sprintf(strBuf+ind, "Execution strategy: "); if (PARALLEL) { ind += sprintf(strBuf+ind, "Parallel search using %d threads\n", GET_NUM_THREADS); if (doSteal) { ind += sprintf(strBuf+ind, " Load balance by work stealing, chunk size = %d nodes\n",chunkSize); ind += sprintf(strBuf+ind, " CBarrier Interval: %d\n", cbint); ind += sprintf(strBuf+ind, " Polling Interval: %d\n", pollint); } else ind += sprintf(strBuf+ind, " No load balancing.\n"); } else ind += sprintf(strBuf+ind, "Iterative sequential search\n"); return ind; } int impl_parseParam(char *param, char *value) { int err = 0; // Return 0 on a match, nonzero on an error switch (param[1]) { #if (PARALLEL == 1) case 'c': chunkSize = atoi(value); break; case 's': doSteal = atoi(value); if (doSteal != 1 && doSteal != 0) err = 1; break; case 'i': cbint = atoi(value); break; #ifdef __BERKELEY_UPC__ case 'I': pollint = atoi(value); break; #endif #ifdef __PTHREADS__ case 'T': pthread_num_threads = atoi(value); if (pthread_num_threads > MAX_THREADS) { printf("Warning: Requested threads > MAX_THREADS. Truncated to %d threads\n", MAX_THREADS); pthread_num_threads = MAX_THREADS; } break; #endif #else /* !PARALLEL */ #ifdef UTS_STAT case 'u': unbType = atoi(value); if (unbType > 2) { err = 1; break; } if (unbType < 0) stats = 0; else stats = 1; break; #endif #endif /* PARALLEL */ default: err = 1; break; } return err; } void impl_helpMessage() { if (PARALLEL) { printf(" -s int zero/nonzero to disable/enable work stealing\n"); printf(" -c int chunksize for work stealing\n"); printf(" -i int set cancellable barrier polling interval\n"); #ifdef __BERKELEY_UPC__ printf(" -I int set working bupc_poll() interval\n"); #endif #ifdef __PTHREADS__ printf(" -T int set number of threads\n"); #endif } else { #ifdef UTS_STAT printf(" -u int unbalance measure (-1: none; 0: min/size; 1: min/n; 2: max/n)\n"); #else printf(" none.\n"); #endif } } void impl_abort(int err) { #if defined(__UPC__) upc_global_exit(err); #elif defined(_OPENMP) exit(err); #elif defined(_SHMEM) exit(err); #else exit(err); #endif } /*********************************************************** * * * FUNCTIONS * * * ***********************************************************/ /* * StealStack * Stack of nodes with sharing at the bottom of the stack * and exclusive access at the top for the "owning" thread * which has affinity to the stack's address space. * * * All operations on the shared portion of the stack * must be guarded using the stack-specific lock. * * Elements move between the shared and exclusive * portion of the stack solely under control of the * owning thread. (ss_release and ss_acquire) * * workAvail is the count of elements in the shared * portion of the stack. It may be read without * acquiring the stack lock, but of course its value * may not be acurate. Idle threads read workAvail in * this speculative fashion to minimize overhead to * working threads. * * Elements can be stolen from the bottom of the shared * portion by non-owning threads. The values are * reserved under lock by the stealing thread, and then * copied without use of the lock (currently space for * reserved values is never reclaimed). * */ /* restore stack to empty state */ void ss_mkEmpty(StealStack *s) { SET_LOCK(s->stackLock); s->sharedStart = 0; s->local = 0; s->top = 0; s->workAvail = 0; UNSET_LOCK(s->stackLock); } /* fatal error */ void ss_error(char *str) { printf("*** [Thread %i] %s\n",GET_THREAD_NUM, str); exit(4); } /* initialize the stack */ void ss_init(StealStack *s, int nelts) { int nbytes = nelts * sizeof(Node); if (debug & 1) printf("Thread %d intializing stealStack %p, sizeof(Node) = %X\n", GET_THREAD_NUM, s, (int)(sizeof(Node))); // allocate stack in shared addr space with affinity to calling thread // and record local addr for efficient access in sequel if(s->stack_g==NULL){ s->stack_g = (SHARED_INDEF Node *) ALLOC (nbytes); s->stack = (Node *) s->stack_g; if (s->stack == NULL) { printf("Request for %d bytes for stealStack on thread %d failed\n", nbytes, GET_THREAD_NUM); ss_error("ss_init: unable to allocate space for stealstack"); } } #ifdef TRACE s->md = (MetaData *) ALLOC (sizeof(MetaData)); if (s->md == NULL) ss_error("ss_init: out of memory"); #endif INIT_LOCK(s->stackLock); if (debug & 1) printf("Thread %d init stackLock %p\n", GET_THREAD_NUM, (void *) s->stackLock); s->stackSize = nelts; s->nNodes = 0; s->maxStackDepth = 0; s->maxTreeDepth = 0; s->nLeaves = 0; s->nAcquire = 0; s->nRelease = 0; s->nSteal = 0; s->nFail = 0; s->wakeups = 0; s->falseWakeups = 0; s->nNodes_last = 0; ss_mkEmpty(s); } /* local push */ void ss_push(StealStack *s, Node *c) { if (s->top >= s->stackSize) ss_error("ss_push: overflow"); if (debug & 1) printf("ss_push: Thread %d, posn %d: node %s [%d]\n", GET_THREAD_NUM, s->top, rng_showstate(c->state.state, debug_str), c->height); memcpy(&(s->stack[s->top]), c, sizeof(Node)); s->top++; s->nNodes++; s->maxStackDepth = (s->top > s->maxStackDepth ? s->top : s->maxStackDepth); s->maxTreeDepth = (s->maxTreeDepth > c->height ? s->maxTreeDepth : c->height); } /* local top: get local addr of node at top */ Node * ss_top(StealStack *s) { Node *r; if (s->top <= s->local) ss_error("ss_top: empty local stack"); r = &(s->stack[(s->top) - 1]); if (debug & 1) printf("ss_top: Thread %d, posn %d: node %s [%d] nchild = %d\n", GET_THREAD_NUM, s->top - 1, rng_showstate(r->state.state, debug_str), r->height, r->numChildren); return r; } /* local pop */ void ss_pop(StealStack *s) { Node *r; if (s->top <= s->local) ss_error("ss_pop: empty local stack"); s->top--; r = &(s->stack[s->top]); if (debug & 1) printf("ss_pop: Thread %d, posn %d: node %s [%d] nchild = %d\n", GET_THREAD_NUM, s->top, rng_showstate(r->state.state, debug_str), r->height, r->numChildren); } /* local top position: stack index of top element */ int ss_topPosn(StealStack *s) { if (s->top <= s->local) ss_error("ss_topPosn: empty local stack"); return s->top - 1; } /* local depth */ int ss_localDepth(StealStack *s) { return (s->top - s->local); } /* release k values from bottom of local stack */ void ss_release(StealStack *s, int k) { SET_LOCK(s->stackLock); if (s->top - s->local >= k) { s->local += k; s->workAvail += k; s->nRelease++; } else ss_error("ss_release: do not have k vals to release"); UNSET_LOCK(s->stackLock); } /* move k values from top of shared stack into local stack * return false if k vals are not avail on shared stack */ int ss_acquire(StealStack *s, int k) { int avail; SET_LOCK(s->stackLock); avail = s->local - s->sharedStart; if (avail >= k) { s->local -= k; s->workAvail -= k; s->nAcquire++; } UNSET_LOCK(s->stackLock); return (avail >= k); } /* steal k values from shared portion of victim thread's stealStack * onto local portion of current thread's stealStack. * return false if k vals are not avail in victim thread */ int ss_steal(StealStack *s, int victim, int k) { int victimLocal, victimShared, victimWorkAvail; int ok; if (s->sharedStart != s->top) ss_error("ss_steal: thief attempts to steal onto non-empty stack"); if (s->top + k >= s->stackSize) ss_error("ss_steal: steal will overflow thief's stack"); /* lock victim stack and try to reserve k elts */ if (debug & 32) printf("Thread %d wants SS %d\n", GET_THREAD_NUM, victim); SET_LOCK(stealStack[victim]->stackLock); #ifdef _SHMEM /* Get remote steal stack */ #ifdef TRACE SMEMCPY(stealStack[victim], stealStack[victim], sizeof(StealStack)-4*sizeof(void*), victim); #else SMEMCPY(stealStack[victim], stealStack[victim], sizeof(StealStack)-3*sizeof(void*), victim); #endif #endif if (debug & 32) printf("Thread %d acquires SS %d\n", GET_THREAD_NUM, victim); victimLocal = stealStack[victim]->local; victimShared = stealStack[victim]->sharedStart; victimWorkAvail = stealStack[victim]->workAvail; if (victimLocal - victimShared != victimWorkAvail) ss_error("ss_steal: stealStack invariant violated"); ok = victimWorkAvail >= k; if (ok) { /* reserve a chunk */ stealStack[victim]->sharedStart = victimShared + k; stealStack[victim]->workAvail = victimWorkAvail - k; #ifdef _SHMEM // FIXME: These transfers ought to be combined. They can't be // though because the data protected by the stacklock is not // the only data in the StealStack structure. PUT(stealStack[victim]->sharedStart, stealStack[victim]->sharedStart, victim); PUT(stealStack[victim]->workAvail, stealStack[victim]->workAvail, victim); #endif } UNSET_LOCK(stealStack[victim]->stackLock); if (debug & 32) printf("Thread %d releases SS %d\n", GET_THREAD_NUM, victim); /* if k elts reserved, move them to local portion of our stack */ if (ok) { SHARED_INDEF Node * victimStackBase = stealStack[victim]->stack_g; SHARED_INDEF Node * victimSharedStart = victimStackBase + victimShared; #ifdef _SHMEM SMEMCPY(&(s->stack[s->top]), victimSharedStart, k * sizeof(Node), victim); #else SMEMCPY(&(s->stack[s->top]), victimSharedStart, k * sizeof(Node)); #endif s->nSteal++; if (debug & 4) { int i; for (i = 0; i < k; i ++) { Node * r = &(s->stack[s->top + i]); printf("ss_steal: Thread %2d posn %d (steal #%d) receives %s [%d] from thread %d posn %d (%p)\n", GET_THREAD_NUM, s->top + i, s->nSteal, rng_showstate(r->state.state, debug_str), r->height, victim, victimShared + i, (void *) victimSharedStart); } } s->top += k; #ifdef TRACE /* update session record of theif */ s->md->stealRecords[s->entries[SS_WORK]].victimThread = victim; #endif } else { s->nFail++; if (debug & 4) { printf("Thread %d failed to steal %d nodes from thread %d, ActAv = %d, sh = %d, loc =%d\n", GET_THREAD_NUM, k, victim, victimWorkAvail, victimShared, victimLocal); } } return (ok); } /* search other threads for work to steal */ int findwork(int k) { int i,v; for (i = 1; i < GET_NUM_THREADS; i++) { v = (GET_THREAD_NUM + i) % GET_NUM_THREADS; #ifdef _SHMEM GET(stealStack[v]->workAvail, stealStack[v]->workAvail, v); #endif if (stealStack[v]->workAvail >= k) return v; } return -1; } /** * Tracing functions * Track changes in the search state for offline analysis. **/ void ss_initState(StealStack *s) { int i; s->timeLast = uts_wctime(); for (i = 0; i < SS_NSTATES; i++) { s->time[i] = 0.0; s->entries[i] = 0; } s->curState = SS_IDLE; if (debug & 8) printf("Thread %d start state %d (t = %f)\n", GET_THREAD_NUM, s->curState, s->timeLast); } void ss_setState(StealStack *s, int state){ double time; if (state < 0 || state >= SS_NSTATES) ss_error("ss_setState: thread state out of range"); if (state == s->curState) return; time = uts_wctime(); s->time[s->curState] += time - s->timeLast; #ifdef TRACE /* close out last session record */ s->md->sessionRecords[s->curState][s->entries[s->curState] - 1].endTime = time; if (s->curState == SS_WORK) { s->md->stealRecords[s->entries[SS_WORK] - 1].nodeCount = s->nNodes - s->md->stealRecords[s->entries[SS_WORK] - 1].nodeCount; } /* initialize new session record */ s->md->sessionRecords[state][s->entries[state]].startTime = time; if (state == SS_WORK) { s->md->stealRecords[s->entries[SS_WORK]].nodeCount = s->nNodes; } #endif s->entries[state]++; s->timeLast = time; s->curState = state; if(debug & 8) printf("Thread %d enter state %d [#%d] (t = %f)\n", GET_THREAD_NUM, state, s->entries[state], time); } #ifdef UTS_STAT /* * Statistics, * : number of nodes per level * : imbalanceness of nodes per level * */ void initHist() { int i; for (i=0; i<MAXHISTSIZE; i++){ hist[i][0]=0; hist[i][1]=0; unbhist[i][1]=1; unbhist[i][2]=0; } } void updateHist(Node* c, double unb) { if (c->height<MAXHISTSIZE){ hist[c->height][1]++; hist[c->height][0]+=c->numChildren; unbhist[c->height][0]+=unb; if (unbhist[c->height][1]>unb) unbhist[c->height][1]=unb; if (unbhist[c->height][2]<unb) unbhist[c->height][2]=unb; } else { hist[MAXHISTSIZE][1]++; hist[MAXHISTSIZE][0]+=c->numChildren; } } void showHist(FILE *fp) { int i; fprintf(fp, "depth\tavgNumChildren\t\tnumChildren\t imb\t maxImb\t minImb\t\n"); for (i=0; i<MAXHISTSIZE; i++){ if ((hist[i][0]!=0)&&(hist[i][1]!=0)) fprintf(fp, "%d\t%f\t%d\t %lf\t%lf\t%lf\n", i, (double)hist[i][0]/hist[i][1], hist[i][0], unbhist[i][0]/hist[i][1], unbhist[i][1], unbhist[i][2]); } } double getImb(Node *c) { int i=0; double avg=.0, tmp=.0; double unb=0.0; avg=(double)c->sizeChildren/c->numChildren; for (i=0; i<c->numChildren; i++){ if ((type==BIN)&&(c->pp==NULL)) { if (unbType<2) tmp=min((double)rootSize[i]/avg, avg/(double)rootSize[i]); else tmp=max((double)rootSize[i]/avg, avg/(double)rootSize[i]); if (unbType>0) unb+=tmp*rootUnb[i]; else unb+=tmp*rootUnb[i]*rootSize[i]; } else{ if (unbType<2) tmp=min((double)c->size[i]/avg, avg/(double)c->size[i]); else tmp=max((double)c->size[i]/avg, avg/(double)c->size[i]); if (unbType>0) unb+=tmp*c->unb[i]; else unb+=tmp*c->unb[i]*c->size[i]; } } if (unbType>0){ if (c->numChildren>0) unb=unb/c->numChildren; else unb=1.0; } else { if (c->sizeChildren>1) unb=unb/c->sizeChildren; else unb=1.0; } if ((debug & 1) && unb>1) printf("unb>1%lf\t%d\n", unb, c->numChildren); return unb; } void getImb_Tseng(Node *c) { double t_max, t_avg, t_devmaxavg, t_normdevmaxavg; if (c->numChildren==0) { t_avg =0; t_max =0; } else { t_max = (double)c->maxSizeChildren/(c->sizeChildren-1); t_avg = (double)1/c->numChildren; } t_devmaxavg = t_max-t_avg; if (debug & 1) printf("max\t%lf, %lf, %d, %d, %d\n", t_max, t_avg, c->maxSizeChildren, c->sizeChildren, c->numChildren); if (1-t_avg==0) t_normdevmaxavg = 1; else t_normdevmaxavg = (t_max-t_avg)/(1-t_avg); imb_max += t_max; imb_avg += t_avg; imb_devmaxavg += t_devmaxavg; imb_normdevmaxavg +=t_normdevmaxavg; } void updateParStat(Node *c) { double unb; totalNodes++; if (maxHeight<c->height) maxHeight=c->height; unb=getImb(c); maxImb=max(unb, maxImb); minImb=min(unb, minImb); updateHist(c, unb); getImb_Tseng(c); if (c->pp!=NULL){ if ((c->type==BIN)&&(c->pp->pp==NULL)){ rootSize[c->pp->ind]=c->sizeChildren; rootUnb[c->pp->ind]=unb; } else{ c->pp->size[c->pp->ind]=c->sizeChildren; c->pp->unb[c->pp->ind]=unb; } /* update statistics per node*/ c->pp->ind++; c->pp->sizeChildren+=c->sizeChildren; if (c->pp->maxSizeChildren<c->sizeChildren) c->pp->maxSizeChildren=c->sizeChildren; } else treeImb = unb; } #endif /* * Tree Implementation * */ void initNode(Node * child) { child->type = -1; child->height = -1; child->numChildren = -1; // not yet determined #ifdef UTS_STAT if (stats){ int i; child->ind = 0; child->sizeChildren = 1; child->maxSizeChildren = 0; child->pp = NULL; for (i = 0; i < MAXNUMCHILDREN; i++){ child->size[i] = 0; child->unb[i] = 0.0; } } #endif } void initRootNode(Node * root, int type) { uts_initRoot(root, type); #ifdef TRACE stealStack[0]->md->stealRecords[0].victimThread = 0; // first session is own "parent session" #endif #ifdef UTS_STAT if (stats){ int i; root->ind = 0; root->sizeChildren = 1; root->maxSizeChildren = 1; root->pp = NULL; if (type != BIN){ for (i=0; i<MAXNUMCHILDREN; i++){ root->size[i] = 0; root->unb[i] =.0; } } else { int rbf = (int) ceil(b_0); rootSize = malloc(rbf*sizeof(int)); rootUnb = malloc(rbf*sizeof(double)); for (i = 0; i < rbf; i++) { rootSize[i] = 0; rootUnb[i] = 0.0; } } } #endif } // forward decl void releaseNodes(StealStack *ss); /* * Generate all children of the parent * * details depend on tree type, node type and shape function * */ void genChildren(Node * parent, Node * child, StealStack * ss) { int parentHeight = parent->height; int numChildren, childType; numChildren = uts_numChildren(parent); childType = uts_childType(parent); // record number of children in parent parent->numChildren = numChildren; if (debug & 2) { printf("Gen: Thread %d, posn %2d: node %s [%d] has %2d children\n", GET_THREAD_NUM, ss_topPosn(ss), rng_showstate(parent->state.state, debug_str), parentHeight, numChildren); } // construct children and push onto stack if (numChildren > 0) { int i, j; child->type = childType; child->height = parentHeight + 1; #ifdef UTS_STAT if (stats) child->pp = parent; // pointer to parent #endif for (i = 0; i < numChildren; i++) { for (j = 0; j < computeGranularity; j++) { // TBD: add parent height to spawn // computeGranularity controls number of rng_spawn calls per node rng_spawn(parent->state.state, child->state.state, i); } ss_push(ss, child); releaseNodes(ss); } } else { ss->nLeaves++; } } /* * Parallel tree traversal * */ // cancellable barrier // initialize lock: single thread under omp, all threads under upc void cb_init(){ INIT_SINGLE_LOCK(cb_lock); if (debug & 4) printf("Thread %d, cb lock at %p\n", GET_THREAD_NUM, (void *) cb_lock); // fixme: no need for all upc threads to repeat this SET_LOCK(cb_lock); cb_count = 0; cb_cancel = 0; cb_done = 0; UNSET_LOCK(cb_lock); } // delay this thread until all threads arrive at barrier // or until barrier is cancelled int cbarrier_wait() { int l_count, l_done, l_cancel; int pe = GET_THREAD_NUM; SET_LOCK(cb_lock); cb_count++; // fprintf(stderr, "PE %d: cb_count=%d, # PEs=%d\n", shmem_my_pe(), cb_count, shmem_n_pes()); #ifdef _SHMEM PUT_ALL(cb_count, cb_count); #endif if (cb_count == GET_NUM_THREADS) { cb_done = 1; #ifdef _SHMEM PUT_ALL(cb_done, cb_done); #endif } l_count = cb_count; l_done = cb_done; if (stealStack[pe]->nNodes_last == stealStack[pe]->nNodes) { ++stealStack[pe]->falseWakeups; } stealStack[GET_THREAD_NUM]->nNodes_last = stealStack[pe]->nNodes; UNSET_LOCK(cb_lock); if (debug & 16) printf("Thread %d enter spin-wait, count = %d, done = %d\n", GET_THREAD_NUM, l_count, l_done); // spin do { #ifdef __BERKELEY_UPC__ bupc_poll(); #endif l_count = cb_count; l_cancel = cb_cancel; l_done = cb_done; } while (!l_cancel && !l_done); // fprintf(stderr, "PE %d: exiting spin loop because l_cancel=%d l_done=%d\n", shmem_my_pe(), l_cancel, l_done); if (debug & 16) printf("Thread %d exit spin-wait, count = %d, done = %d, cancel = %d\n", GET_THREAD_NUM, l_count, l_done, l_cancel); SET_LOCK(cb_lock); cb_count--; l_count = cb_count; #ifdef _SHMEM PUT_ALL(cb_count, cb_count); #endif cb_cancel = 0; l_done = cb_done; ++stealStack[GET_THREAD_NUM]->wakeups; UNSET_LOCK(cb_lock); if (debug & 16) printf("Thread %d exit idle state, count = %d, done = %d\n", GET_THREAD_NUM, l_count, cb_done); return cb_done; } // causes one or more threads waiting at barrier, if any, // to be released void cbarrier_cancel() { #ifdef _SHMEM cb_cancel = 1; PUT_ALL(cb_cancel, cb_cancel); #elif defined (__BERKELEY_UPC__) bupc_waitsync(cb_handle); cb_handle = bupc_memput_async((shared void*)&cb_cancel, (const void*)&local_cb_cancel, sizeof(int)); #else cb_cancel = 1; #endif /* _SHMEM */ } void releaseNodes(StealStack *ss){ if (doSteal) { if (ss_localDepth(ss) > 2 * chunkSize) { // Attribute this time to runtime overhead ss_setState(ss, SS_OVH); ss_release(ss, chunkSize); // This has significant overhead on clusters! if (ss->nNodes % cbint == 0) { ss_setState(ss, SS_CBOVH); cbarrier_cancel(); } #ifdef __BERKELEY_UPC__ if (ss->nNodes % pollint == 0) { ss_setState(ss, SS_OVH); bupc_poll(); } #endif ss_setState(ss, SS_WORK); } } } /* * parallel search of UTS trees using work stealing * * Note: tree size is measured by the number of * push operations */ void parTreeSearch(StealStack *ss) { int done = 0; Node * parent; Node child; /* template for children */ initNode(&child); /* tree search */ while (done == 0) { // fprintf(stderr, "PE %d: AAAAA\n", shmem_my_pe()); /* local work */ while (ss_localDepth(ss) > 0) { ss_setState(ss, SS_WORK); /* examine node at stack top */ parent = ss_top(ss); if (parent->numChildren < 0){ // fprintf(stderr, "PE %d: processed a node\n", shmem_my_pe()); // first time visited, construct children and place on stack genChildren(parent,&child,ss); } else { // second time visit, process accumulated statistics and pop #ifdef UTS_STAT if (stats) updateParStat(parent); #endif ss_pop(ss); } // release some nodes for stealing, if enough are available // and wake up quiescent threads releaseNodes(ss); } // fprintf(stderr, "PE %d: BBBBB\n", shmem_my_pe()); /* local work exhausted on this stack - resume tree search if able * to re-acquire work from shared portion of this thread's stack */ if (ss_acquire(ss, chunkSize)) continue; // fprintf(stderr, "PE %d: CCCCC\n", shmem_my_pe()); /* no work left in this thread's stack */ /* try to steal work from another thread's stack */ if (doSteal) { int goodSteal = 0; int victimId; // fprintf(stderr, "PE %d: DDDDD\n", shmem_my_pe()); ss_setState(ss, SS_SEARCH); victimId = findwork(chunkSize); while (victimId != -1 && !goodSteal) { // some work detected, try to steal it goodSteal = ss_steal(ss, victimId, chunkSize); if (!goodSteal) victimId = findwork(chunkSize); } // fprintf(stderr, "PE %d: EEEEE %d\n", shmem_my_pe(), goodSteal); if (goodSteal) continue; } /* unable to steal work from shared portion of other stacks - * enter quiescent state waiting for termination (done != 0) * or cancellation because some thread has made work available * (done == 0). */ ss_setState(ss, SS_IDLE); // fprintf(stderr, "PE %d: FFFFF\n", shmem_my_pe()); done = cbarrier_wait(); // fprintf(stderr, "PE %d: GGGGG\n", shmem_my_pe()); } /* tree search complete ! */ } #ifdef __PTHREADS__ /* Pthreads ParTreeSearch Arguments */ struct pthread_args { StealStack *ss; int id; }; /* Pthreads ParTreeSearch Wrapper */ void * pthread_spawn_search(void *arg) { pthread_setspecific(pthread_thread_num, &((struct pthread_args*)arg)->id); parTreeSearch(((struct pthread_args*)arg)->ss); return NULL; } #endif /* __PTHREADS__ */ #ifdef TRACE // print session records for each thread (used when trace is enabled) void printSessionRecords() { int i, j, k; double offset; for (i = 0; i < GET_NUM_THREADS; i++) { offset = startTime[i] - startTime[0]; for (j = 0; j < SS_NSTATES; j++) for (k = 0; k < stealStack[i]->entries[j]; k++) { printf ("%d %d %f %f", i, j, stealStack[i]->md->sessionRecords[j][k].startTime - offset, stealStack[i]->md->sessionRecords[j][k].endTime - offset); if (j == SS_WORK) printf (" %d %ld", stealStack[i]->md->stealRecords[k].victimThread, stealStack[i]->md->stealRecords[k].nodeCount); printf ("\n"); } } } #endif // display search statistics void showStats(double elapsedSecs) { int i; int tnodes = 0, tleaves = 0, trel = 0, tacq = 0, tsteal = 0, tfail= 0; int mdepth = 0, mheight = 0; double twork = 0.0, tsearch = 0.0, tidle = 0.0, tovh = 0.0, tcbovh = 0.0; #ifdef _SHMEM { int pe; /* Assemble all of the stealstacks so we can gather some stats. */ for (i = 1; i < GET_NUM_THREADS; i++) { pe = (GET_THREAD_NUM + i) % GET_NUM_THREADS; /* Collect up all of the StealStacks */ SMEMCPY(stealStack[pe], stealStack[pe], sizeof(StealStack), pe); #ifdef TRACE /* Get the MetaData as well */ SMEMCPY(stealStack[pe]->md, stealStack[pe]->md, sizeof(StealStack), pe); #endif } } #endif // combine measurements from all threads for (i = 0; i < GET_NUM_THREADS; i++) { tnodes += stealStack[i]->nNodes; tleaves += stealStack[i]->nLeaves; trel += stealStack[i]->nRelease; tacq += stealStack[i]->nAcquire; tsteal += stealStack[i]->nSteal; tfail += stealStack[i]->nFail; twork += stealStack[i]->time[SS_WORK]; tsearch += stealStack[i]->time[SS_SEARCH]; tidle += stealStack[i]->time[SS_IDLE]; tovh += stealStack[i]->time[SS_OVH]; tcbovh += stealStack[i]->time[SS_CBOVH]; mdepth = (mdepth > stealStack[i]->maxStackDepth ? mdepth : stealStack[i]->maxStackDepth); mheight = (mheight > stealStack[i]->maxTreeDepth ? mheight : stealStack[i]->maxTreeDepth); } if (trel != tacq + tsteal) { printf("*** error! total released != total acquired + total stolen\n"); } uts_showStats(GET_NUM_THREADS, chunkSize, elapsedSecs, tnodes, tleaves, mheight); if (verbose > 1) { if (doSteal) { printf("Total chunks released = %d, of which %d reacquired and %d stolen\n", trel, tacq, tsteal); printf("Failed steal operations = %d, ", tfail); } printf("Max stealStack size = %d\n", mdepth); printf("Avg time per thread: Work = %.6f, Search = %.6f, Idle = %.6f\n", (twork / GET_NUM_THREADS), (tsearch / GET_NUM_THREADS), (tidle / GET_NUM_THREADS)); printf(" Overhead = %6f, CB_Overhead = %6f\n\n", (tovh / GET_NUM_THREADS), (tcbovh/GET_NUM_THREADS)); } // per thread execution info if (verbose > 2) { for (i = 0; i < GET_NUM_THREADS; i++) { printf("** Thread %d\n", i); printf(" # nodes explored = %d\n", stealStack[i]->nNodes); printf(" # chunks released = %d\n", stealStack[i]->nRelease); printf(" # chunks reacquired = %d\n", stealStack[i]->nAcquire); printf(" # chunks stolen = %d\n", stealStack[i]->nSteal); printf(" # failed steals = %d\n", stealStack[i]->nFail); printf(" maximum stack depth = %d\n", stealStack[i]->maxStackDepth); printf(" work time = %.6f secs (%d sessions)\n", stealStack[i]->time[SS_WORK], stealStack[i]->entries[SS_WORK]); printf(" overhead time = %.6f secs (%d sessions)\n", stealStack[i]->time[SS_OVH], stealStack[i]->entries[SS_OVH]); printf(" search time = %.6f secs (%d sessions)\n", stealStack[i]->time[SS_SEARCH], stealStack[i]->entries[SS_SEARCH]); printf(" idle time = %.6f secs (%d sessions)\n", stealStack[i]->time[SS_IDLE], stealStack[i]->entries[SS_IDLE]); printf(" wakeups = %d, false wakeups = %d (%.2f%%)", stealStack[i]->wakeups, stealStack[i]->falseWakeups, (stealStack[i]->wakeups == 0) ? 0.00 : ((((double)stealStack[i]->falseWakeups)/stealStack[i]->wakeups)*100.0)); printf("\n"); } } #ifdef TRACE printSessionRecords(); #endif // tree statistics output to stat.txt, if requested #ifdef UTS_STAT if (stats) { FILE *fp; char * tmpstr; char strBuf[5000]; int ind = 0; fp = fopen("stat.txt", "a+w"); fprintf(fp, "\n------------------------------------------------------------------------------------------------------\n"); ind = uts_paramsToStr(strBuf, ind); ind = impl_paramsToStr(strBuf, ind); //showParametersStr(strBuf); fprintf(fp, "%s\n", strBuf); fprintf(fp, "\nTotal nodes = %d\n", totalNodes); fprintf(fp, "Max depth = %d\n\n", maxHeight); fprintf(fp, "Tseng ImbMeasure(overall)\n max:\t\t%lf \n avg:\t\t%lf \n devMaxAvg:\t %lf\n normDevMaxAvg: %lf\t\t\n\n", imb_max/totalNodes, imb_avg/totalNodes, imb_devmaxavg/totalNodes, imb_normdevmaxavg/totalNodes); switch (unbType){ case 0: tmpstr = "(min imb weighted by size)"; break; case 1: tmpstr = "(min imb not weighted by size)"; break; case 2: tmpstr = "(max imb not weighted by size)"; break; default: tmpstr = "(?unknown measure)"; break; } fprintf(fp, "ImbMeasure:\t%s\n Overall:\t %lf\n Max:\t\t%lf\n Min:\t\t%lf\n\n", tmpstr, treeImb, minImb, maxImb); showHist(fp); fprintf(fp, "\n------------------------------------------------------------------------------------------------------\n\n\n"); fclose(fp); } #endif } /* PThreads main() function: * Pthreads is quite a bit different because of how global data has to be stored * using setspecific() and getspecific(). So, many functions are not safe to call * in the single-threaded context. */ #ifdef __PTHREADS__ int pthread_main(int argc, char *argv[]) { Node root; double t1, t2; int i, err; void *rval; struct pthread_args *args; pthread_t *thread_ids; uts_parseParams(argc, argv); uts_printParams(); cb_init(); /* allocate stealstacks */ for (i = 0; i < GET_NUM_THREADS; i++) { stealStack[i] = ALLOC (sizeof(StealStack)); ss_init(stealStack[i], MAXSTACKDEPTH); } /* initialize root node and push on thread 0 stack */ uts_initRoot(&root, type); ss_push(stealStack[0], &root); thread_ids = malloc(sizeof(pthread_t)*GET_NUM_THREADS); args = malloc(sizeof(struct pthread_args)*GET_NUM_THREADS); pthread_key_create(&pthread_thread_num, NULL); /* start timing */ t1 = uts_wctime(); for (i = 0; i < GET_NUM_THREADS; i++) { ss_initState(stealStack[i]); args[i].ss = stealStack[i]; args[i].id = i; err = pthread_create(&thread_ids[i], NULL, pthread_spawn_search, (void*)&args[i]); if (err != 0) { printf("FATAL: Error spawning thread %d\n", err); impl_abort(1); } } for (i = 0; i < GET_NUM_THREADS; i++) { pthread_join(thread_ids[i], &rval); } /* stop timing */ t2 = uts_wctime(); showStats(t2-t1); return 0; } #endif /* __PTHREADS__ */ /* Main() function for: Sequential, OpenMP, UPC, and Shmem * * Notes on execution model: * - under openMP, global vars are all shared * - under UPC, global vars are private unless explicitly shared * - UPC is SPMD starting with main, OpenMP goes SPMD after * parsing parameters */ int main(int argc, char *argv[]) { Node root; #ifdef __PTHREADS__ return pthread_main(argc, argv); #endif #ifdef _SHMEM shmem_init(); // start_pes(0); #endif /* determine benchmark parameters (all PEs) */ uts_parseParams(argc, argv); #ifdef UTS_STAT if (stats) initHist(); #endif /* cancellable barrier initialization (single threaded under OMP) */ cb_init(); /********** SPMD Parallel Region **********/ #pragma omp parallel { double t1, t2, et; StealStack * ss; /* show parameter settings */ if (GET_THREAD_NUM == 0) { uts_printParams(); } /* initialize stealstacks */ #ifdef _SHMEM { /* Shared allocation is a collective operation in Shmem. These * need to be done all at once and in the same order on each PE. * * Note: Only our own stealstack will contain valid data as UTS runs. * For stats, we'll need to gather everyone else's stealstacks */ int i; stealStack[0] = (SHARED StealStack *) ALLOC (sizeof(StealStack)); memset(stealStack[0],0,sizeof(StealStack)); ss = (StealStack *) stealStack[0]; ss_init(ss, MAXSTACKDEPTH); for (i = 1; i < GET_NUM_THREADS; i++) { stealStack[i] = (SHARED StealStack *) ALLOC (sizeof(StealStack)); ss = (StealStack *) stealStack[i]; ss->stack_g=ss->stack=stealStack[0]->stack; ss_init(ss, MAXSTACKDEPTH); } ss = stealStack[GET_THREAD_NUM]; } #else stealStack[GET_THREAD_NUM] = (SHARED StealStack *) ALLOC (sizeof(StealStack)); ss = (StealStack *) stealStack[GET_THREAD_NUM]; ss_init(ss, MAXSTACKDEPTH); #endif /* _SHMEM */ /* initialize root node and push on thread 0 stack */ if (GET_THREAD_NUM == 0) { initRootNode(&root, type); ss_push(ss, &root); } // line up for the start #pragma omp barrier BARRIER /* time parallel search */ ss_initState(ss); t1 = uts_wctime(); parTreeSearch(ss); t2 = uts_wctime(); et = t2 - t1; #ifdef TRACE startTime[GET_THREAD_NUM] = t1; ss->md->sessionRecords[SS_IDLE][ss->entries[SS_IDLE] - 1].endTime = t2; #endif #pragma omp barrier BARRIER /* display results */ if (GET_THREAD_NUM == 0) { showStats(et); } } /********** End Parallel Region **********/ #ifdef _SHMEM shmem_finalize(); #endif return 0; }
cloudsc_driver.c
/* * (C) Copyright 1988- ECMWF. * * This software is licensed under the terms of the Apache Licence Version 2.0 * which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. * In applying this licence, ECMWF does not waive the privileges and immunities * granted to it by virtue of its status as an intergovernmental organisation * nor does it submit to any jurisdiction. */ #include "cloudsc_driver.h" #include <omp.h> #include "mycpu.h" #define min(a, b) (((a) < (b)) ? (a) : (b)) #define max(a, b) (((a) > (b)) ? (a) : (b)) void cloudsc_driver(int numthreads, int numcols, int nproma) { double *tend_tmp_u; double *tend_tmp_v; double *tend_tmp_t; double *tend_tmp_q; double *tend_tmp_o3; double *tend_tmp_a; double *tend_tmp_cld; double *tend_loc_u; double *tend_loc_v; double *tend_loc_t; double *tend_loc_q; double *tend_loc_o3; double *tend_loc_a; double *tend_loc_cld; double *tend_cml_u; double *tend_cml_v; double *tend_cml_t; double *tend_cml_q; double *tend_cml_o3; double *tend_cml_a; double *tend_cml_cld; double ptsphy; //! Physics timestep double *plcrit_aer; double *picrit_aer; double *pre_ice; double *pccn; //! liquid cloud condensation nuclei double *pnice; //! ice number concentration (cf. CCN) double *pt; //! T at start of callpar double *pq; //! Q at start of callpar double *pvfa; //! CC from VDF scheme double *pvfl; //! Liq from VDF scheme double *pvfi; //! Ice from VDF scheme double *pdyna; //! CC from Dynamics double *pdynl; //! Liq from Dynamics double *pdyni; //! Liq from Dynamics double *phrsw; //! Short-wave heating rate double *phrlw; //! Long-wave heating rate double *pvervel; //! Vertical velocity double *pap; //! Pressure on full levels double *paph; //! Pressure on half levels double *plsm; //! Land fraction (0-1) int *ktype; //! Convection type 0,1,2 double *plu; //! Conv. condensate double *plude; //! Conv. detrained water double *plude_tmp; double *psnde; //! Conv. detrained snow double *pmfu; //! Conv. mass flux up double *pmfd; //! Conv. mass flux down double *pa; //! Original Cloud fraction (t) double *pclv; double *psupsat; double *pcovptot; //! Precip fraction double *prainfrac_toprfz; double *pfsqlf; //! Flux of liquid double *pfsqif; //! Flux of ice double *pfcqlng; //! -ve corr for liq double *pfcqnng; //! -ve corr for ice double *pfsqrf; //! Flux diagnostics double *pfsqsf; //! for DDH, generic double *pfcqrng; //! rain double *pfcqsng; //! snow double *pfsqltur; //! liquid flux due to VDF double *pfsqitur; //! ice flux due to VDF double *pfplsl; //! liq+rain sedim flux double *pfplsn; //! ice+snow sedim flux double *pfhpsl; //! Enthalpy flux for liq double *pfhpsn; //! Enthalpy flux for ice /* Define or query data dimensions from input file */ int klon, nlev; int nblocks = (numcols / nproma) + min(numcols % nproma, 1); double zinfo[4][numthreads]; const double zhpm = 12482329.0; // IBM P7 HPM flop count for 100 points at L137 nclv = 5; // number of microphysics variables ncldql = 1; // liquid cloud water ncldqi = 2; // ice cloud water ncldqr = 3; // rain water ncldqs = 4; // snow ncldqv = 5; // vapour yrecldp = malloc(sizeof(struct TECLDP)); query_state(&klon, &nlev); tend_loc_t = (double*) malloc( sizeof(double) * nblocks*nlev*nproma ); tend_loc_q = (double*) malloc( sizeof(double) * nblocks*nlev*nproma ); tend_loc_a = (double*) malloc( sizeof(double) * nblocks*nlev*nproma ); tend_loc_cld = (double*) malloc( sizeof(double) * nblocks*nlev*nproma*nclv ); tend_cml_t = (double*) malloc( sizeof(double) * nblocks*nlev*nproma ); tend_cml_q = (double*) malloc( sizeof(double) * nblocks*nlev*nproma ); tend_cml_a = (double*) malloc( sizeof(double) * nblocks*nlev*nproma ); tend_cml_cld = (double*) malloc( sizeof(double) * nblocks*nlev*nproma*nclv ); tend_tmp_t = (double*) malloc( sizeof(double) * nblocks*nlev*nproma ); tend_tmp_q = (double*) malloc( sizeof(double) * nblocks*nlev*nproma ); tend_tmp_a = (double*) malloc( sizeof(double) * nblocks*nlev*nproma ); tend_tmp_cld = (double*) malloc( sizeof(double) * nblocks*nlev*nproma*nclv ); plcrit_aer = (double*) malloc( sizeof(double) * nblocks*nlev*nproma ); picrit_aer = (double*) malloc( sizeof(double) * nblocks*nlev*nproma ); pre_ice = (double*) malloc( sizeof(double) * nblocks*nlev*nproma ); pccn = (double*) malloc( sizeof(double) * nblocks*nlev*nproma ); pnice = (double*) malloc( sizeof(double) * nblocks*nlev*nproma ); pt = (double*) malloc( sizeof(double) * nblocks*nlev*nproma ); pq = (double*) malloc( sizeof(double) * nblocks*nlev*nproma ); pvfa = (double*) malloc( sizeof(double) * nblocks*nlev*nproma ); pvfl = (double*) malloc( sizeof(double) * nblocks*nlev*nproma ); pvfi = (double*) malloc( sizeof(double) * nblocks*nlev*nproma ); pdyna = (double*) malloc( sizeof(double) * nblocks*nlev*nproma ); pdynl = (double*) malloc( sizeof(double) * nblocks*nlev*nproma ); pdyni = (double*) malloc( sizeof(double) * nblocks*nlev*nproma ); phrsw = (double*) malloc( sizeof(double) * nblocks*nlev*nproma ); phrlw = (double*) malloc( sizeof(double) * nblocks*nlev*nproma ); pvervel = (double*) malloc( sizeof(double) * nblocks*nlev*nproma ); pap = (double*) malloc( sizeof(double) * nblocks*nlev*nproma ); paph = (double*) malloc( sizeof(double) * nblocks*(nlev+1)*nproma ); plsm = (double*) malloc( sizeof(double) * nblocks*nproma ); ktype = (int*) malloc( sizeof(int) * nblocks*nproma ); plu = (double*) malloc( sizeof(double) * nblocks*nlev*nproma ); plude = (double*) malloc( sizeof(double) * nblocks*nlev*nproma ); psnde = (double*) malloc( sizeof(double) * nblocks*nlev*nproma ); pmfu = (double*) malloc( sizeof(double) * nblocks*nlev*nproma ); pmfd = (double*) malloc( sizeof(double) * nblocks*nlev*nproma ); pa = (double*) malloc( sizeof(double) * nblocks*nlev*nproma ); pclv = (double*) malloc( sizeof(double) * nblocks*nlev*nproma*nclv ); psupsat = (double*) malloc( sizeof(double) * nblocks*nlev*nproma ); pcovptot = (double*) malloc( sizeof(double) * nblocks*nlev*nproma ); prainfrac_toprfz = (double*) malloc( sizeof(double) * nblocks*nproma ); pfsqlf = (double*) malloc( sizeof(double) * nblocks*(nlev+1)*nproma ); pfsqif = (double*) malloc( sizeof(double) * nblocks*(nlev+1)*nproma ); pfcqnng = (double*) malloc( sizeof(double) * nblocks*(nlev+1)*nproma ); pfcqlng = (double*) malloc( sizeof(double) * nblocks*(nlev+1)*nproma ); pfsqrf = (double*) malloc( sizeof(double) * nblocks*(nlev+1)*nproma ); pfsqsf = (double*) malloc( sizeof(double) * nblocks*(nlev+1)*nproma ); pfcqrng = (double*) malloc( sizeof(double) * nblocks*(nlev+1)*nproma ); pfcqsng = (double*) malloc( sizeof(double) * nblocks*(nlev+1)*nproma ); pfsqltur = (double*) malloc( sizeof(double) * nblocks*(nlev+1)*nproma ); pfsqitur = (double*) malloc( sizeof(double) * nblocks*(nlev+1)*nproma ); pfplsl = (double*) malloc( sizeof(double) * nblocks*(nlev+1)*nproma ); pfplsn = (double*) malloc( sizeof(double) * nblocks*(nlev+1)*nproma ); pfhpsl = (double*) malloc( sizeof(double) * nblocks*(nlev+1)*nproma ); pfhpsn = (double*) malloc( sizeof(double) * nblocks*(nlev+1)*nproma ); load_state(klon, nlev, nclv, numcols, nproma, &ptsphy, plcrit_aer, picrit_aer, pre_ice, pccn, pnice, pt, pq, tend_cml_t, tend_cml_q, tend_cml_a, tend_cml_cld, tend_tmp_t, tend_tmp_q, tend_tmp_a, tend_tmp_cld, pvfa, pvfl, pvfi, pdyna, pdynl, pdyni, phrsw, phrlw, pvervel, pap, paph, plsm, ktype, plu, plude, psnde, pmfu, pmfd, pa, pclv, psupsat); double t1 = omp_get_wtime(); #pragma omp parallel num_threads(numthreads) default(shared) { int b, bsize, icalls=0, igpc=0; int coreid = mycpu(); int tid = omp_get_thread_num(); double start = omp_get_wtime(); /* #pragma omp parallel for num_threads(numthreads) default(shared) private(b, bsize) */ #pragma omp for schedule(runtime) nowait for (b = 0; b < nblocks; b++) { const int idx = b*nlev*nproma; const int idxp1 = b*(nlev+1)*nproma; const int idx1d = b*nproma; const int idx3d = b*nclv*nlev*nproma; bsize = min(nproma, numcols - b*nproma); for (int i = 0; i < nlev*nproma; i++) { pcovptot[idx+i] = 0.0; } for (int i = 0; i < nclv*nlev*nproma; i++) { tend_loc_cld[idx3d+i] = 0.0; } cloudsc_c(1, bsize, nproma, nlev, ptsphy, &pt[idx], &pq[idx], &tend_cml_t[idx], &tend_cml_q[idx], &tend_cml_a[idx], &tend_cml_cld[idx3d], &tend_tmp_t[idx], &tend_tmp_q[idx], &tend_tmp_a[idx], &tend_tmp_cld[idx3d], &tend_loc_t[idx], &tend_loc_q[idx], &tend_loc_a[idx], &tend_loc_cld[idx3d], &pvfa[idx], &pvfl[idx], &pvfi[idx], &pdyna[idx], &pdynl[idx], &pdyni[idx], &phrsw[idx], &phrlw[idx], &pvervel[idx], &pap[idx], &paph[idxp1], &plsm[idx1d], &ktype[idx1d], &plu[idx], &plude[idx], &psnde[idx], &pmfu[idx], &pmfd[idx], &pa[idx], &pclv[idx3d], &psupsat[idx], &plcrit_aer[idx], &picrit_aer[idx], &pre_ice[idx], &pccn[idx], &pnice[idx], &pcovptot[idx], &prainfrac_toprfz[idx1d], &pfsqlf[idxp1], &pfsqif[idxp1], &pfcqnng[idxp1], &pfcqlng[idxp1], &pfsqrf[idxp1], &pfsqsf[idxp1], &pfcqrng[idxp1], &pfcqsng[idxp1], &pfsqltur[idxp1], &pfsqitur[idxp1], &pfplsl[idxp1], &pfplsn[idxp1], &pfhpsl[idxp1], &pfhpsn[idxp1]); icalls += 1; igpc += bsize; } double end = omp_get_wtime(); /* int msec = diff * 1000 / CLOCKS_PER_SEC; */ zinfo[0][tid] = end - start; zinfo[1][tid] = (double) coreid; zinfo[2][tid] = (double) icalls; zinfo[3][tid] = (double) igpc; } double t2 = omp_get_wtime(); printf(" NUMOMP=%d, NGPTOT=%d, NPROMA=%d, NGPBLKS=%d\n", numthreads, numcols, nproma, nblocks); printf(" %+10s%+10s%+10s%+10s%+10s %+4s : %+10s%+10s\n", "NUMOMP", "NGPTOT", "#GP-cols", "#BLKS", "NPROMA", "tid#", "Time(msec)", "MFlops/s"); double zfrac, zmflops; for (int t = 0; t < numthreads; t++) { const double tloc = zinfo[0][t]; const int coreid = (int) zinfo[1][t]; const int icalls = (int) zinfo[2][t]; const int igpc = (int) zinfo[3][t]; zfrac = (double)igpc / (double)numcols; if (tloc > 0.0) { zmflops = 1.0e-06 * zfrac * zhpm * ((double)numcols / 100.) / tloc; } else { zmflops = 0.; } printf(" %10d%10d%10d%10d%10d %4d : %10d%10d @ core#\n", numthreads, numcols, igpc, icalls, nproma, t, (int)(tloc * 1000.), (int)zmflops); } double tdiff = t2 - t1; zfrac = 1.0; if (tdiff > 0.0) { zmflops = 1.0e-06 * zfrac * zhpm * ((double)numcols / 100.) / tdiff; } else { zmflops = 0.0; } printf(" %10d%10d%10d%10d%10d %4d : %10d%10d TOTAL\n", numthreads, numcols, numcols, nblocks, nproma, -1, (int)(tdiff * 1000.), (int)zmflops); cloudsc_validate(klon, nlev, nclv, numcols, nproma, plude, pcovptot, prainfrac_toprfz, pfsqlf, pfsqif, pfcqlng, pfcqnng, pfsqrf, pfsqsf, pfcqrng, pfcqsng, pfsqltur, pfsqitur, pfplsl, pfplsn, pfhpsl, pfhpsn, tend_loc_a, tend_loc_q, tend_loc_t, tend_loc_cld); free(plcrit_aer); // ALLOCATE(PLCRIT_AER(KLON,KLEV)) free(picrit_aer); // ALLOCATE(PICRIT_AER(KLON,KLEV)) free(pre_ice); // ALLOCATE(PRE_ICE(KLON,KLEV)) free(pccn); free(pnice); free(pt); free(pq); free(pvfa); free(pvfl); free(pvfi); free(pdyna); free(pdynl); free(pdyni); free(phrsw); free(phrlw); free(pvervel); free(pap); free(paph); // ALLOCATE(PAPH(KLON,KLEV+1)) free(plsm); free(ktype); free(plu); free(plude); free(psnde); free(pmfu); free(pmfd); free(pa); free(pclv); free(psupsat); free(pcovptot); free(tend_loc_t); free(tend_loc_q); free(tend_loc_a); free(tend_loc_cld); free(tend_tmp_t); free(tend_tmp_q); free(tend_tmp_a); free(tend_tmp_cld); free(tend_cml_t); free(tend_cml_q); free(tend_cml_a); free(tend_cml_cld); free(prainfrac_toprfz); free(pfsqlf); free(pfsqif); free(pfcqnng); free(pfcqlng); free(pfsqrf); free(pfsqsf); free(pfcqrng); free(pfcqsng); free(pfsqltur); free(pfsqitur); free(pfplsl); free(pfplsn); free(pfhpsl); free(pfhpsn); free(yrecldp); }
t_cholmod_gpu.c
/* ========================================================================== */ /* === GPU/t_cholmod_gpu ==================================================== */ /* ========================================================================== */ /* ----------------------------------------------------------------------------- * CHOLMOD/GPU Module. Copyright (C) 2005-2012, Timothy A. Davis * http://www.suitesparse.com * -------------------------------------------------------------------------- */ /* GPU BLAS template routine for cholmod_super_numeric. */ /* ========================================================================== */ /* === include files and definitions ======================================== */ /* ========================================================================== */ #ifdef GPU_BLAS #include <string.h> #include "cholmod_template.h" #undef L_ENTRY #ifdef REAL #define L_ENTRY 1 #else #define L_ENTRY 2 #endif /* ========================================================================== */ /* === gpu_clear_memory ===================================================== */ /* ========================================================================== */ /* * Ensure the Lx is zeroed before forming factor. This is a significant cost * in the GPU case - so using this parallel memset code for efficiency. */ void TEMPLATE2 (CHOLMOD (gpu_clear_memory)) ( double* buff, size_t size, int num_threads ) { int chunk_multiplier = 5; int num_chunks = chunk_multiplier * num_threads; size_t chunksize = size / num_chunks; size_t i; #pragma omp parallel for num_threads(num_threads) private(i) schedule(dynamic) for(i = 0; i < num_chunks; i++) { size_t chunkoffset = i * chunksize; if(i == num_chunks - 1) { memset(buff + chunkoffset, 0, (size - chunksize*(num_chunks - 1)) * sizeof(double)); } else { memset(buff + chunkoffset, 0, chunksize * sizeof(double)); } } } /* ========================================================================== */ /* === gpu_init ============================================================= */ /* ========================================================================== */ /* * Performs required initialization for GPU computing. * * Returns 0 if there is an error, so the intended use is * * useGPU = CHOLMOD(gpu_init) * * which would locally turn off gpu processing if the initialization failed. */ int TEMPLATE2 (CHOLMOD (gpu_init)) ( void *Cwork, cholmod_factor *L, cholmod_common *Common, Int nsuper, Int n, Int nls, cholmod_gpu_pointers *gpu_p ) { Int i, k, maxSize ; cublasStatus_t cublasError ; cudaError_t cudaErr ; size_t maxBytesSize, HostPinnedSize ; feenableexcept (FE_DIVBYZERO | FE_INVALID | FE_OVERFLOW ); maxSize = L->maxcsize; /* #define PAGE_SIZE (4*1024) */ CHOLMOD_GPU_PRINTF (("gpu_init : %p\n", (void *) ((size_t) Cwork & ~(4*1024-1)))) ; /* make sure the assumed buffer sizes are large enough */ if ( (nls+2*n+4)*sizeof(Int) > Common->devBuffSize ) { ERROR (CHOLMOD_GPU_PROBLEM,"\n\n" "GPU Memory allocation error. Ls, Map and RelativeMap exceed\n" "devBuffSize. It is not clear if this is due to insufficient\n" "device or host memory or both. You can try:\n" " 1) increasing the amount of GPU memory requested\n" " 2) reducing CHOLMOD_NUM_HOST_BUFFERS\n" " 3) using a GPU & host with more memory\n" "This issue is a known limitation and should be fixed in a \n" "future release of CHOLMOD.\n") ; return (0) ; } /* divvy up the memory in dev_mempool */ gpu_p->d_Lx[0] = Common->dev_mempool; gpu_p->d_Lx[1] = Common->dev_mempool + Common->devBuffSize; gpu_p->d_C = Common->dev_mempool + 2*Common->devBuffSize; gpu_p->d_A[0] = Common->dev_mempool + 3*Common->devBuffSize; gpu_p->d_A[1] = Common->dev_mempool + 4*Common->devBuffSize; gpu_p->d_Ls = Common->dev_mempool + 5*Common->devBuffSize; gpu_p->d_Map = gpu_p->d_Ls + (nls+1)*sizeof(Int) ; gpu_p->d_RelativeMap = gpu_p->d_Map + (n+1)*sizeof(Int) ; /* Copy all of the Ls and Lpi data to the device. If any supernodes are * to be computed on the device then this will be needed, so might as * well do it now. */ cudaErr = cudaMemcpy ( gpu_p->d_Ls, L->s, nls*sizeof(Int), cudaMemcpyHostToDevice ); CHOLMOD_HANDLE_CUDA_ERROR(cudaErr,"cudaMemcpy(d_Ls)"); if (!(Common->gpuStream[0])) { /* ------------------------------------------------------------------ */ /* create each CUDA stream */ /* ------------------------------------------------------------------ */ for ( i=0; i<CHOLMOD_HOST_SUPERNODE_BUFFERS; i++ ) { cudaErr = cudaStreamCreate ( &(Common->gpuStream[i]) ); if (cudaErr != cudaSuccess) { ERROR (CHOLMOD_GPU_PROBLEM, "CUDA stream") ; return (0) ; } } /* ------------------------------------------------------------------ */ /* create each CUDA event */ /* ------------------------------------------------------------------ */ for (i = 0 ; i < 3 ; i++) { cudaErr = cudaEventCreateWithFlags (&(Common->cublasEventPotrf [i]), cudaEventDisableTiming) ; if (cudaErr != cudaSuccess) { ERROR (CHOLMOD_GPU_PROBLEM, "CUDA event") ; return (0) ; } } for (i = 0 ; i < CHOLMOD_HOST_SUPERNODE_BUFFERS ; i++) { cudaErr = cudaEventCreateWithFlags (&(Common->updateCBuffersFree[i]), cudaEventDisableTiming) ; if (cudaErr != cudaSuccess) { ERROR (CHOLMOD_GPU_PROBLEM, "CUDA event") ; return (0) ; } } cudaErr = cudaEventCreateWithFlags ( &(Common->updateCKernelsComplete), cudaEventDisableTiming ); if (cudaErr != cudaSuccess) { ERROR (CHOLMOD_GPU_PROBLEM, "CUDA updateCKernelsComplete event") ; return (0) ; } } gpu_p->h_Lx[0] = (double*)(Common->host_pinned_mempool); for ( k=1; k<CHOLMOD_HOST_SUPERNODE_BUFFERS; k++ ) { gpu_p->h_Lx[k] = (double*)((char *)(Common->host_pinned_mempool) + k*Common->devBuffSize); } return (1); /* initialization successfull, useGPU = 1 */ } /* ========================================================================== */ /* === gpu_reorder_descendants ============================================== */ /* ========================================================================== */ /* Reorder the descendant supernodes as: * 1st - descendant supernodes eligible for processing on the GPU * in increasing (by flops) order * 2nd - supernodes whose processing is to remain on the CPU * in any order * * All of the GPU-eligible supernodes will be scheduled first. All * CPU-eligible descendants will overlap with the last (largest) * CHOLMOD_HOST_SUPERNODE_BUFFERS GPU-eligible descendants. */ void TEMPLATE2 (CHOLMOD (gpu_reorder_descendants)) ( cholmod_common *Common, Int *Super, Int *locals, Int *Lpi, Int *Lpos, Int *Head, Int *Next, Int *Previous, Int *ndescendants, Int *tail, Int *mapCreatedOnGpu, cholmod_gpu_pointers *gpu_p ) { Int prevd, nextd, firstcpu, d, k, kd1, kd2, ndcol, pdi, pdend, pdi1; Int dnext, ndrow2, p; Int n_descendant = 0; double score; /* use h_Lx[0] to buffer the GPU-eligible descendants */ struct cholmod_descendant_score_t* scores = (struct cholmod_descendant_score_t*) gpu_p->h_Lx[0]; double cpuref = 0.0; int nreverse = 1; int previousd; d = Head[*locals]; prevd = -1; firstcpu = -1; *mapCreatedOnGpu = 0; while ( d != EMPTY ) { /* Get the parameters for the current descendant supernode */ kd1 = Super [d] ; /* d contains cols kd1 to kd2-1 of L */ kd2 = Super [d+1] ; ndcol = kd2 - kd1 ; /* # of columns in all of d */ pdi = Lpi [d] ; /* pointer to first row of d in Ls */ pdend = Lpi [d+1] ; /* pointer just past last row of d in Ls */ p = Lpos [d] ; /* offset of 1st row of d affecting s */ pdi1 = pdi + p ; /* ptr to 1st row of d affecting s in Ls */ ndrow2 = pdend - pdi1; nextd = Next[d]; /* compute a rough flops 'score' for this descendant supernode */ score = ndrow2 * ndcol; if ( ndrow2*L_ENTRY >= CHOLMOD_ND_ROW_LIMIT && ndcol*L_ENTRY >= CHOLMOD_ND_COL_LIMIT ) { score += Common->devBuffSize; } /* place in sort buffer */ scores[n_descendant].score = score; scores[n_descendant].d = d; n_descendant++; d = nextd; } /* Sort the GPU-eligible supernodes */ qsort ( scores, n_descendant, sizeof(struct cholmod_descendant_score_t), (__compar_fn_t) CHOLMOD(score_comp) ); /* Place sorted data back in descendant supernode linked list*/ if ( n_descendant > 0 ) { Head[*locals] = scores[0].d; if ( n_descendant > 1 ) { #pragma omp parallel for num_threads(CHOLMOD_OMP_NUM_THREADS) \ if (n_descendant > 64) for ( k=1; k<n_descendant; k++ ) { Next[scores[k-1].d] = scores[k].d; } } Next[scores[n_descendant-1].d] = firstcpu; } /* reverse the first CHOLMOD_HOST_SUPERNODE_BUFFERS to better hide PCIe communications */ if ( Head[*locals] != EMPTY && Next[Head[*locals]] != EMPTY ) { previousd = Head[*locals]; d = Next[Head[*locals]]; while ( d!=EMPTY && nreverse < CHOLMOD_HOST_SUPERNODE_BUFFERS ) { kd1 = Super [d] ; /* d contains cols kd1 to kd2-1 of L */ kd2 = Super [d+1] ; ndcol = kd2 - kd1 ; /* # of columns in all of d */ pdi = Lpi [d] ; /* pointer to first row of d in Ls */ pdend = Lpi [d+1] ; /* pointer just past last row of d in Ls */ p = Lpos [d] ; /* offset of 1st row of d affecting s */ pdi1 = pdi + p ; /* ptr to 1st row of d affecting s in Ls */ ndrow2 = pdend - pdi1; nextd = Next[d]; nreverse++; if ( ndrow2*L_ENTRY >= CHOLMOD_ND_ROW_LIMIT && ndcol*L_ENTRY >= CHOLMOD_ND_COL_LIMIT ) { /* place this supernode at the front of the list */ Next[previousd] = Next[d]; Next[d] = Head[*locals]; Head[*locals] = d; } else { previousd = d; } d = nextd; } } /* create a 'previous' list so we can traverse backwards */ *ndescendants = 0; if ( Head[*locals] != EMPTY ) { Previous[Head[*locals]] = EMPTY; for (d = Head [*locals] ; d != EMPTY ; d = dnext) { (*ndescendants)++; dnext = Next[d]; if ( dnext != EMPTY ) { Previous[dnext] = d; } else { *tail = d; } } } return; } /* ========================================================================== */ /* === gpu_initialize_supernode ============================================= */ /* ========================================================================== */ /* C = L (k1:n-1, kd1:kd2-1) * L (k1:k2-1, kd1:kd2-1)', except that k1:n-1 */ void TEMPLATE2 (CHOLMOD (gpu_initialize_supernode)) ( cholmod_common *Common, Int nscol, Int nsrow, Int psi, cholmod_gpu_pointers *gpu_p ) { cudaError_t cuErr; /* initialize the device supernode assemby memory to zero */ cuErr = cudaMemset ( gpu_p->d_A[0], 0, nscol*nsrow*L_ENTRY*sizeof(double) ); CHOLMOD_HANDLE_CUDA_ERROR(cuErr,"cudaMemset(d_A)"); /* Create the Map on the device */ createMapOnDevice ( (Int *)(gpu_p->d_Map), (Int *)(gpu_p->d_Ls), psi, nsrow ); return; } /* ========================================================================== */ /* === gpu_updateC ========================================================== */ /* ========================================================================== */ /* C = L (k1:n-1, kd1:kd2-1) * L (k1:k2-1, kd1:kd2-1)', except that k1:n-1 * refers to all of the rows in L, but many of the rows are all zero. * Supernode d holds columns kd1 to kd2-1 of L. Nonzero rows in the range * k1:k2-1 are in the list Ls [pdi1 ... pdi2-1], of size ndrow1. Nonzero rows * in the range k2:n-1 are in the list Ls [pdi2 ... pdend], of size ndrow2. * Let L1 = L (Ls [pdi1 ... pdi2-1], kd1:kd2-1), and let L2 = L (Ls [pdi2 ... * pdend], kd1:kd2-1). C is ndrow2-by-ndrow1. Let C1 be the first ndrow1 * rows of C and let C2 be the last ndrow2-ndrow1 rows of C. Only the lower * triangular part of C1 needs to be computed since C1 is symmetric. * * UpdateC is completely asynchronous w.r.t. the GPU. Once the input buffer * d_Lx[] has been filled, all of the device operations are issues, and the * host can continue with filling the next input buffer / or start processing * all of the descendant supernodes which are not eligible for processing on * the device (since they are too small - will not fill the device). */ int TEMPLATE2 (CHOLMOD (gpu_updateC)) ( Int ndrow1, /* C is ndrow2-by-ndrow2 */ Int ndrow2, Int ndrow, /* leading dimension of Lx */ Int ndcol, /* L1 is ndrow1-by-ndcol */ Int nsrow, Int pdx1, /* L1 starts at Lx + L_ENTRY*pdx1 */ /* L2 starts at Lx + L_ENTRY*(pdx1 + ndrow1) */ Int pdi1, double *Lx, double *C, cholmod_common *Common, cholmod_gpu_pointers *gpu_p ) { double *devPtrLx, *devPtrC ; double alpha, beta ; cublasStatus_t cublasStatus ; cudaError_t cudaStat [2] ; Int ndrow3 ; int icol, irow; int iHostBuff, iDevBuff ; #ifndef NTIMER double tstart = 0; #endif if ((ndrow2*L_ENTRY < CHOLMOD_ND_ROW_LIMIT) || (ndcol*L_ENTRY < CHOLMOD_ND_COL_LIMIT)) { /* too small for the CUDA BLAS; use the CPU instead */ return (0) ; } ndrow3 = ndrow2 - ndrow1 ; #ifndef NTIMER Common->syrkStart = SuiteSparse_time ( ) ; Common->CHOLMOD_GPU_SYRK_CALLS++ ; #endif /* ---------------------------------------------------------------------- */ /* allocate workspace on the GPU */ /* ---------------------------------------------------------------------- */ iHostBuff = (Common->ibuffer)%CHOLMOD_HOST_SUPERNODE_BUFFERS; iDevBuff = (Common->ibuffer)%CHOLMOD_DEVICE_STREAMS; /* cycle the device Lx buffer, d_Lx, through CHOLMOD_DEVICE_STREAMS, usually 2, so we can overlap the copy of this descendent supernode with the compute of the previous descendant supernode */ devPtrLx = (double *)(gpu_p->d_Lx[iDevBuff]); /* very little overlap between kernels for difference descendant supernodes (since we enforce the supernodes must be large enough to fill the device) so we only need one C buffer */ devPtrC = (double *)(gpu_p->d_C); /* ---------------------------------------------------------------------- */ /* copy Lx to the GPU */ /* ---------------------------------------------------------------------- */ /* copy host data to pinned buffer first for better H2D bandwidth */ #pragma omp parallel for num_threads(CHOLMOD_OMP_NUM_THREADS) if (ndcol > 32) for ( icol=0; icol<ndcol; icol++ ) { for ( irow=0; irow<ndrow2*L_ENTRY; irow++ ) { gpu_p->h_Lx[iHostBuff][icol*ndrow2*L_ENTRY+irow] = Lx[pdx1*L_ENTRY+icol*ndrow*L_ENTRY + irow]; } } cudaStat[0] = cudaMemcpyAsync ( devPtrLx, gpu_p->h_Lx[iHostBuff], ndrow2*ndcol*L_ENTRY*sizeof(devPtrLx[0]), cudaMemcpyHostToDevice, Common->gpuStream[iDevBuff] ); if ( cudaStat[0] ) { CHOLMOD_GPU_PRINTF ((" ERROR cudaMemcpyAsync = %d \n", cudaStat[0])); return (0); } /* make the current stream wait for kernels in previous streams */ cudaStreamWaitEvent ( Common->gpuStream[iDevBuff], Common->updateCKernelsComplete, 0 ) ; /* ---------------------------------------------------------------------- */ /* create the relative map for this descendant supernode */ /* ---------------------------------------------------------------------- */ createRelativeMapOnDevice ( (Int *)(gpu_p->d_Map), (Int *)(gpu_p->d_Ls), (Int *)(gpu_p->d_RelativeMap), pdi1, ndrow2, &(Common->gpuStream[iDevBuff]) ); /* ---------------------------------------------------------------------- */ /* do the CUDA SYRK */ /* ---------------------------------------------------------------------- */ cublasStatus = cublasSetStream (Common->cublasHandle, Common->gpuStream[iDevBuff]) ; if (cublasStatus != CUBLAS_STATUS_SUCCESS) { ERROR (CHOLMOD_GPU_PROBLEM, "GPU CUBLAS stream") ; } alpha = 1.0 ; beta = 0.0 ; #ifdef REAL cublasStatus = cublasDsyrk (Common->cublasHandle, CUBLAS_FILL_MODE_LOWER, CUBLAS_OP_N, (int) ndrow1, (int) ndcol, /* N, K: L1 is ndrow1-by-ndcol */ &alpha, /* ALPHA: 1 */ devPtrLx, ndrow2, /* A, LDA: L1, ndrow2 */ &beta, /* BETA: 0 */ devPtrC, ndrow2) ; /* C, LDC: C1 */ #else cublasStatus = cublasZherk (Common->cublasHandle, CUBLAS_FILL_MODE_LOWER, CUBLAS_OP_N, (int) ndrow1, (int) ndcol, /* N, K: L1 is ndrow1-by-ndcol*/ &alpha, /* ALPHA: 1 */ (const cuDoubleComplex *) devPtrLx, ndrow2, /* A, LDA: L1, ndrow2 */ &beta, /* BETA: 0 */ (cuDoubleComplex *) devPtrC, ndrow2) ; /* C, LDC: C1 */ #endif if (cublasStatus != CUBLAS_STATUS_SUCCESS) { ERROR (CHOLMOD_GPU_PROBLEM, "GPU CUBLAS routine failure") ; } #ifndef NTIMER Common->CHOLMOD_GPU_SYRK_TIME += SuiteSparse_time() - Common->syrkStart; #endif /* ---------------------------------------------------------------------- */ /* compute remaining (ndrow2-ndrow1)-by-ndrow1 block of C, C2 = L2*L1' */ /* ---------------------------------------------------------------------- */ #ifndef NTIMER Common->CHOLMOD_GPU_GEMM_CALLS++ ; tstart = SuiteSparse_time(); #endif if (ndrow3 > 0) { #ifndef REAL cuDoubleComplex calpha = {1.0,0.0} ; cuDoubleComplex cbeta = {0.0,0.0} ; #endif /* ------------------------------------------------------------------ */ /* do the CUDA BLAS dgemm */ /* ------------------------------------------------------------------ */ #ifdef REAL alpha = 1.0 ; beta = 0.0 ; cublasStatus = cublasDgemm (Common->cublasHandle, CUBLAS_OP_N, CUBLAS_OP_T, ndrow3, ndrow1, ndcol, /* M, N, K */ &alpha, /* ALPHA: 1 */ devPtrLx + L_ENTRY*(ndrow1), /* A, LDA: L2*/ ndrow2, /* ndrow */ devPtrLx, /* B, LDB: L1 */ ndrow2, /* ndrow */ &beta, /* BETA: 0 */ devPtrC + L_ENTRY*ndrow1, /* C, LDC: C2 */ ndrow2) ; #else cublasStatus = cublasZgemm (Common->cublasHandle, CUBLAS_OP_N, CUBLAS_OP_C, ndrow3, ndrow1, ndcol, /* M, N, K */ &calpha, /* ALPHA: 1 */ (const cuDoubleComplex*) devPtrLx + ndrow1, ndrow2, /* ndrow */ (const cuDoubleComplex *) devPtrLx, ndrow2, /* ndrow */ &cbeta, /* BETA: 0 */ (cuDoubleComplex *)devPtrC + ndrow1, ndrow2) ; #endif if (cublasStatus != CUBLAS_STATUS_SUCCESS) { ERROR (CHOLMOD_GPU_PROBLEM, "GPU CUBLAS routine failure") ; } } #ifndef NTIMER Common->CHOLMOD_GPU_GEMM_TIME += SuiteSparse_time() - tstart; #endif /* ------------------------------------------------------------------ */ /* Assemble the update C on the device using the d_RelativeMap */ /* ------------------------------------------------------------------ */ #ifdef REAL addUpdateOnDevice ( gpu_p->d_A[0], devPtrC, gpu_p->d_RelativeMap, ndrow1, ndrow2, nsrow, &(Common->gpuStream[iDevBuff]) ); #else addComplexUpdateOnDevice ( gpu_p->d_A[0], devPtrC, gpu_p->d_RelativeMap, ndrow1, ndrow2, nsrow, &(Common->gpuStream[iDevBuff]) ); #endif /* Record an event indicating that kernels for this descendant are complete */ cudaEventRecord ( Common->updateCKernelsComplete, Common->gpuStream[iDevBuff]); cudaEventRecord ( Common->updateCBuffersFree[iHostBuff], Common->gpuStream[iDevBuff]); return (1) ; } /* ========================================================================== */ /* === gpu_final_assembly =================================================== */ /* ========================================================================== */ /* If the supernode was assembled on both the CPU and the GPU, this will * complete the supernode assembly on both the GPU and CPU. */ void TEMPLATE2 (CHOLMOD (gpu_final_assembly)) ( cholmod_common *Common, double *Lx, Int psx, Int nscol, Int nsrow, int supernodeUsedGPU, int *iHostBuff, int *iDevBuff, cholmod_gpu_pointers *gpu_p ) { Int iidx, i, j; Int iHostBuff2 ; Int iDevBuff2 ; if ( supernodeUsedGPU ) { /* ------------------------------------------------------------------ */ /* Apply all of the Shur-complement updates, computed on the gpu, to */ /* the supernode. */ /* ------------------------------------------------------------------ */ *iHostBuff = (Common->ibuffer)%CHOLMOD_HOST_SUPERNODE_BUFFERS; *iDevBuff = (Common->ibuffer)%CHOLMOD_DEVICE_STREAMS; if ( nscol * L_ENTRY >= CHOLMOD_POTRF_LIMIT ) { /* If this supernode is going to be factored using the GPU (potrf) * then it will need the portion of the update assembled ont the * CPU. So copy that to a pinned buffer an H2D copy to device. */ /* wait until a buffer is free */ cudaEventSynchronize ( Common->updateCBuffersFree[*iHostBuff] ); /* copy update assembled on CPU to a pinned buffer */ #pragma omp parallel for num_threads(CHOLMOD_OMP_NUM_THREADS) \ private(iidx) if (nscol>32) for ( j=0; j<nscol; j++ ) { for ( i=j; i<nsrow*L_ENTRY; i++ ) { iidx = j*nsrow*L_ENTRY+i; gpu_p->h_Lx[*iHostBuff][iidx] = Lx[psx*L_ENTRY+iidx]; } } /* H2D transfer of update assembled on CPU */ cudaMemcpyAsync ( gpu_p->d_A[1], gpu_p->h_Lx[*iHostBuff], nscol*nsrow*L_ENTRY*sizeof(double), cudaMemcpyHostToDevice, Common->gpuStream[*iDevBuff] ); } Common->ibuffer++; iHostBuff2 = (Common->ibuffer)%CHOLMOD_HOST_SUPERNODE_BUFFERS; iDevBuff2 = (Common->ibuffer)%CHOLMOD_DEVICE_STREAMS; /* wait for all kernels to complete */ cudaEventSynchronize( Common->updateCKernelsComplete ); /* copy assembled Schur-complement updates computed on GPU */ cudaMemcpyAsync ( gpu_p->h_Lx[iHostBuff2], gpu_p->d_A[0], nscol*nsrow*L_ENTRY*sizeof(double), cudaMemcpyDeviceToHost, Common->gpuStream[iDevBuff2] ); if ( nscol * L_ENTRY >= CHOLMOD_POTRF_LIMIT ) { /* with the current implementation, potrf still uses data from the * CPU - so put the fully assembled supernode in a pinned buffer for * fastest access */ /* need both H2D and D2H copies to be complete */ cudaDeviceSynchronize(); /* sum updates from cpu and device on device */ #ifdef REAL sumAOnDevice ( gpu_p->d_A[1], gpu_p->d_A[0], -1.0, nsrow, nscol ); #else sumComplexAOnDevice ( gpu_p->d_A[1], gpu_p->d_A[0], -1.0, nsrow, nscol ); #endif /* place final assembled supernode in pinned buffer */ #pragma omp parallel for num_threads(CHOLMOD_OMP_NUM_THREADS) \ private(iidx) if (nscol>32) for ( j=0; j<nscol; j++ ) { for ( i=j*L_ENTRY; i<nscol*L_ENTRY; i++ ) { iidx = j*nsrow*L_ENTRY+i; gpu_p->h_Lx[*iHostBuff][iidx] -= gpu_p->h_Lx[iHostBuff2][iidx]; } } } else { /* assemble with CPU updates */ cudaDeviceSynchronize(); #pragma omp parallel for num_threads(CHOLMOD_OMP_NUM_THREADS) \ private(iidx) if (nscol>32) for ( j=0; j<nscol; j++ ) { for ( i=j*L_ENTRY; i<nsrow*L_ENTRY; i++ ) { iidx = j*nsrow*L_ENTRY+i; Lx[psx*L_ENTRY+iidx] -= gpu_p->h_Lx[iHostBuff2][iidx]; } } } } return; } /* ========================================================================== */ /* === gpu_lower_potrf ====================================================== */ /* ========================================================================== */ /* Cholesky factorzation (dpotrf) of a matrix S, operating on the lower * triangular part only. S is nscol2-by-nscol2 with leading dimension nsrow. * * S is the top part of the supernode (the lower triangular matrx). * This function also copies the bottom rectangular part of the supernode (B) * onto the GPU, in preparation for gpu_triangular_solve. */ /* * On entry, d_A[1] contains the fully assembled supernode */ int TEMPLATE2 (CHOLMOD (gpu_lower_potrf)) ( Int nscol2, /* S is nscol2-by-nscol2 */ Int nsrow, /* leading dimension of S */ Int psx, /* S is located at Lx + L_ENTRY*psx */ double *Lx, /* contains S; overwritten with Cholesky factor */ Int *info, /* BLAS info return value */ cholmod_common *Common, cholmod_gpu_pointers *gpu_p ) { double *devPtrA, *devPtrB, *A ; double alpha, beta ; cudaError_t cudaStat ; cublasStatus_t cublasStatus ; Int j, nsrow2, nb, n, gpu_lda, lda, gpu_ldb ; int ilda, ijb, iinfo ; #ifndef NTIMER double tstart ; #endif if (nscol2 * L_ENTRY < CHOLMOD_POTRF_LIMIT) { /* too small for the CUDA BLAS; use the CPU instead */ return (0) ; } #ifndef NTIMER tstart = SuiteSparse_time ( ) ; Common->CHOLMOD_GPU_POTRF_CALLS++ ; #endif nsrow2 = nsrow - nscol2 ; /* ---------------------------------------------------------------------- */ /* heuristic to get the block size depending of the problem size */ /* ---------------------------------------------------------------------- */ nb = 128 ; if (nscol2 > 4096) nb = 256 ; if (nscol2 > 8192) nb = 384 ; n = nscol2 ; gpu_lda = ((nscol2+31)/32)*32 ; lda = nsrow ; A = gpu_p->h_Lx[(Common->ibuffer+CHOLMOD_HOST_SUPERNODE_BUFFERS-1)% CHOLMOD_HOST_SUPERNODE_BUFFERS]; /* ---------------------------------------------------------------------- */ /* determine the GPU leading dimension of B */ /* ---------------------------------------------------------------------- */ gpu_ldb = 0 ; if (nsrow2 > 0) { gpu_ldb = ((nsrow2+31)/32)*32 ; } /* ---------------------------------------------------------------------- */ /* remember where device memory is, to be used by triangular solve later */ /* ---------------------------------------------------------------------- */ devPtrA = gpu_p->d_Lx[0]; devPtrB = gpu_p->d_Lx[1]; /* ---------------------------------------------------------------------- */ /* copy A from device to device */ /* ---------------------------------------------------------------------- */ cudaStat = cudaMemcpy2DAsync ( devPtrA, gpu_lda * L_ENTRY * sizeof (devPtrA[0]), gpu_p->d_A[1], nsrow * L_ENTRY * sizeof (Lx[0]), nscol2 * L_ENTRY * sizeof (devPtrA[0]), nscol2, cudaMemcpyDeviceToDevice, Common->gpuStream[0] ); if ( cudaStat ) { ERROR ( CHOLMOD_GPU_PROBLEM, "GPU memcopy device to device"); } /* ---------------------------------------------------------------------- */ /* copy B in advance, for gpu_triangular_solve */ /* ---------------------------------------------------------------------- */ if (nsrow2 > 0) { cudaStat = cudaMemcpy2DAsync (devPtrB, gpu_ldb * L_ENTRY * sizeof (devPtrB [0]), gpu_p->d_A[1] + L_ENTRY*nscol2, nsrow * L_ENTRY * sizeof (Lx [0]), nsrow2 * L_ENTRY * sizeof (devPtrB [0]), nscol2, cudaMemcpyDeviceToDevice, Common->gpuStream[0]) ; if (cudaStat) { ERROR (CHOLMOD_GPU_PROBLEM, "GPU memcopy to device") ; } } /* ------------------------------------------------------------------ */ /* define the dpotrf stream */ /* ------------------------------------------------------------------ */ cublasStatus = cublasSetStream (Common->cublasHandle, Common->gpuStream [0]) ; if (cublasStatus != CUBLAS_STATUS_SUCCESS) { ERROR (CHOLMOD_GPU_PROBLEM, "GPU CUBLAS stream") ; } /* ---------------------------------------------------------------------- */ /* block Cholesky factorization of S */ /* ---------------------------------------------------------------------- */ for (j = 0 ; j < n ; j += nb) { Int jb = nb < (n-j) ? nb : (n-j) ; /* ------------------------------------------------------------------ */ /* do the CUDA BLAS dsyrk */ /* ------------------------------------------------------------------ */ alpha = -1.0 ; beta = 1.0 ; #ifdef REAL cublasStatus = cublasDsyrk (Common->cublasHandle, CUBLAS_FILL_MODE_LOWER, CUBLAS_OP_N, jb, j, &alpha, devPtrA + j, gpu_lda, &beta, devPtrA + j + j*gpu_lda, gpu_lda) ; #else cublasStatus = cublasZherk (Common->cublasHandle, CUBLAS_FILL_MODE_LOWER, CUBLAS_OP_N, jb, j, &alpha, (cuDoubleComplex*)devPtrA + j, gpu_lda, &beta, (cuDoubleComplex*)devPtrA + j + j*gpu_lda, gpu_lda) ; #endif if (cublasStatus != CUBLAS_STATUS_SUCCESS) { ERROR (CHOLMOD_GPU_PROBLEM, "GPU CUBLAS routine failure") ; } /* ------------------------------------------------------------------ */ cudaStat = cudaEventRecord (Common->cublasEventPotrf [0], Common->gpuStream [0]) ; if (cudaStat) { ERROR (CHOLMOD_GPU_PROBLEM, "CUDA event failure") ; } cudaStat = cudaStreamWaitEvent (Common->gpuStream [1], Common->cublasEventPotrf [0], 0) ; if (cudaStat) { ERROR (CHOLMOD_GPU_PROBLEM, "CUDA event failure") ; } /* ------------------------------------------------------------------ */ /* copy back the jb columns on two different streams */ /* ------------------------------------------------------------------ */ cudaStat = cudaMemcpy2DAsync (A + L_ENTRY*(j + j*lda), lda * L_ENTRY * sizeof (double), devPtrA + L_ENTRY*(j + j*gpu_lda), gpu_lda * L_ENTRY * sizeof (double), L_ENTRY * sizeof (double)*jb, jb, cudaMemcpyDeviceToHost, Common->gpuStream [1]) ; if (cudaStat) { ERROR (CHOLMOD_GPU_PROBLEM, "GPU memcopy from device") ; } /* ------------------------------------------------------------------ */ /* do the CUDA BLAS dgemm */ /* ------------------------------------------------------------------ */ if ((j+jb) < n) { #ifdef REAL alpha = -1.0 ; beta = 1.0 ; cublasStatus = cublasDgemm (Common->cublasHandle, CUBLAS_OP_N, CUBLAS_OP_T, (n-j-jb), jb, j, &alpha, devPtrA + (j+jb), gpu_lda, devPtrA + (j) , gpu_lda, &beta, devPtrA + (j+jb + j*gpu_lda), gpu_lda) ; #else cuDoubleComplex calpha = {-1.0,0.0} ; cuDoubleComplex cbeta = { 1.0,0.0} ; cublasStatus = cublasZgemm (Common->cublasHandle, CUBLAS_OP_N, CUBLAS_OP_C, (n-j-jb), jb, j, &calpha, (cuDoubleComplex*)devPtrA + (j+jb), gpu_lda, (cuDoubleComplex*)devPtrA + (j), gpu_lda, &cbeta, (cuDoubleComplex*)devPtrA + (j+jb + j*gpu_lda), gpu_lda ) ; #endif if (cublasStatus != CUBLAS_STATUS_SUCCESS) { ERROR (CHOLMOD_GPU_PROBLEM, "GPU CUBLAS routine failure") ; } } cudaStat = cudaStreamSynchronize (Common->gpuStream [1]) ; if (cudaStat) { ERROR (CHOLMOD_GPU_PROBLEM, "GPU memcopy to device") ; } /* ------------------------------------------------------------------ */ /* compute the Cholesky factorization of the jbxjb block on the CPU */ /* ------------------------------------------------------------------ */ ilda = (int) lda ; ijb = jb ; #ifdef REAL LAPACK_DPOTRF ("L", &ijb, A + L_ENTRY * (j + j*lda), &ilda, &iinfo) ; #else LAPACK_ZPOTRF ("L", &ijb, A + L_ENTRY * (j + j*lda), &ilda, &iinfo) ; #endif *info = iinfo ; if (*info != 0) { *info = *info + j ; break ; } /* ------------------------------------------------------------------ */ /* copy the result back to the GPU */ /* ------------------------------------------------------------------ */ cudaStat = cudaMemcpy2DAsync (devPtrA + L_ENTRY*(j + j*gpu_lda), gpu_lda * L_ENTRY * sizeof (double), A + L_ENTRY * (j + j*lda), lda * L_ENTRY * sizeof (double), L_ENTRY * sizeof (double) * jb, jb, cudaMemcpyHostToDevice, Common->gpuStream [0]) ; if (cudaStat) { ERROR (CHOLMOD_GPU_PROBLEM, "GPU memcopy to device") ; } /* ------------------------------------------------------------------ */ /* do the CUDA BLAS dtrsm */ /* ------------------------------------------------------------------ */ if ((j+jb) < n) { #ifdef REAL alpha = 1.0 ; cublasStatus = cublasDtrsm (Common->cublasHandle, CUBLAS_SIDE_RIGHT, CUBLAS_FILL_MODE_LOWER, CUBLAS_OP_T, CUBLAS_DIAG_NON_UNIT, (n-j-jb), jb, &alpha, devPtrA + (j + j*gpu_lda), gpu_lda, devPtrA + (j+jb + j*gpu_lda), gpu_lda) ; #else cuDoubleComplex calpha = {1.0,0.0}; cublasStatus = cublasZtrsm (Common->cublasHandle, CUBLAS_SIDE_RIGHT, CUBLAS_FILL_MODE_LOWER, CUBLAS_OP_C, CUBLAS_DIAG_NON_UNIT, (n-j-jb), jb, &calpha, (cuDoubleComplex *)devPtrA + (j + j*gpu_lda), gpu_lda, (cuDoubleComplex *)devPtrA + (j+jb + j*gpu_lda), gpu_lda) ; #endif if (cublasStatus != CUBLAS_STATUS_SUCCESS) { ERROR (CHOLMOD_GPU_PROBLEM, "GPU CUBLAS routine failure") ; } /* -------------------------------------------------------------- */ /* Copy factored column back to host. */ /* -------------------------------------------------------------- */ cudaStat = cudaEventRecord (Common->cublasEventPotrf[2], Common->gpuStream[0]) ; if (cudaStat) { ERROR (CHOLMOD_GPU_PROBLEM, "CUDA event failure") ; } cudaStat = cudaStreamWaitEvent (Common->gpuStream[1], Common->cublasEventPotrf[2], 0) ; if (cudaStat) { ERROR (CHOLMOD_GPU_PROBLEM, "CUDA event failure") ; } cudaStat = cudaMemcpy2DAsync (A + L_ENTRY*(j + jb + j * lda), lda * L_ENTRY * sizeof (double), devPtrA + L_ENTRY* (j + jb + j * gpu_lda), gpu_lda * L_ENTRY * sizeof (double), L_ENTRY * sizeof (double)* (n - j - jb), jb, cudaMemcpyDeviceToHost, Common->gpuStream[1]) ; if (cudaStat) { ERROR (CHOLMOD_GPU_PROBLEM, "GPU memcopy to device") ; } } } #ifndef NTIMER Common->CHOLMOD_GPU_POTRF_TIME += SuiteSparse_time ( ) - tstart ; #endif return (1) ; } /* ========================================================================== */ /* === gpu_triangular_solve ================================================= */ /* ========================================================================== */ /* The current supernode is columns k1 to k2-1 of L. Let L1 be the diagonal * block (factorized by dpotrf/zpotrf above; rows/cols k1:k2-1), and L2 be rows * k2:n-1 and columns k1:k2-1 of L. The triangular system to solve is L2*L1' = * S2, where S2 is overwritten with L2. More precisely, L2 = S2 / L1' in * MATLAB notation. */ /* Version with pre-allocation in POTRF */ int TEMPLATE2 (CHOLMOD (gpu_triangular_solve)) ( Int nsrow2, /* L1 and S2 are nsrow2-by-nscol2 */ Int nscol2, /* L1 is nscol2-by-nscol2 */ Int nsrow, /* leading dimension of L1, L2, and S2 */ Int psx, /* L1 is at Lx+L_ENTRY*psx; * L2 at Lx+L_ENTRY*(psx+nscol2)*/ double *Lx, /* holds L1, L2, and S2 */ cholmod_common *Common, cholmod_gpu_pointers *gpu_p ) { double *devPtrA, *devPtrB ; cudaError_t cudaStat ; cublasStatus_t cublasStatus ; Int gpu_lda, gpu_ldb, gpu_rowstep ; Int gpu_row_start = 0 ; Int gpu_row_max_chunk, gpu_row_chunk; int ibuf = 0; int iblock = 0; int iHostBuff = (Common->ibuffer+CHOLMOD_HOST_SUPERNODE_BUFFERS-1) % CHOLMOD_HOST_SUPERNODE_BUFFERS; int i, j; Int iidx; int iwrap; #ifndef NTIMER double tstart ; #endif #ifdef REAL double alpha = 1.0 ; gpu_row_max_chunk = 768; #else cuDoubleComplex calpha = {1.0,0.0} ; gpu_row_max_chunk = 256; #endif if ( nsrow2 <= 0 ) { return (0) ; } #ifndef NTIMER tstart = SuiteSparse_time ( ) ; Common->CHOLMOD_GPU_TRSM_CALLS++ ; #endif gpu_lda = ((nscol2+31)/32)*32 ; gpu_ldb = ((nsrow2+31)/32)*32 ; devPtrA = gpu_p->d_Lx[0]; devPtrB = gpu_p->d_Lx[1]; /* make sure the copy of B has completed */ cudaStreamSynchronize( Common->gpuStream[0] ); /* ---------------------------------------------------------------------- */ /* do the CUDA BLAS dtrsm */ /* ---------------------------------------------------------------------- */ while ( gpu_row_start < nsrow2 ) { gpu_row_chunk = nsrow2 - gpu_row_start; if ( gpu_row_chunk > gpu_row_max_chunk ) { gpu_row_chunk = gpu_row_max_chunk; } cublasStatus = cublasSetStream ( Common->cublasHandle, Common->gpuStream[ibuf] ); if ( cublasStatus != CUBLAS_STATUS_SUCCESS ) { ERROR ( CHOLMOD_GPU_PROBLEM, "GPU CUBLAS stream"); } #ifdef REAL cublasStatus = cublasDtrsm (Common->cublasHandle, CUBLAS_SIDE_RIGHT, CUBLAS_FILL_MODE_LOWER, CUBLAS_OP_T, CUBLAS_DIAG_NON_UNIT, gpu_row_chunk, nscol2, &alpha, devPtrA, gpu_lda, devPtrB + gpu_row_start, gpu_ldb) ; #else cublasStatus = cublasZtrsm (Common->cublasHandle, CUBLAS_SIDE_RIGHT, CUBLAS_FILL_MODE_LOWER, CUBLAS_OP_C, CUBLAS_DIAG_NON_UNIT, gpu_row_chunk, nscol2, &calpha, (const cuDoubleComplex *) devPtrA, gpu_lda, (cuDoubleComplex *)devPtrB + gpu_row_start , gpu_ldb) ; #endif if (cublasStatus != CUBLAS_STATUS_SUCCESS) { ERROR (CHOLMOD_GPU_PROBLEM, "GPU CUBLAS routine failure") ; } /* ------------------------------------------------------------------ */ /* copy result back to the CPU */ /* ------------------------------------------------------------------ */ cudaStat = cudaMemcpy2DAsync ( gpu_p->h_Lx[iHostBuff] + L_ENTRY*(nscol2+gpu_row_start), nsrow * L_ENTRY * sizeof (Lx [0]), devPtrB + L_ENTRY*gpu_row_start, gpu_ldb * L_ENTRY * sizeof (devPtrB [0]), gpu_row_chunk * L_ENTRY * sizeof (devPtrB [0]), nscol2, cudaMemcpyDeviceToHost, Common->gpuStream[ibuf]); if (cudaStat) { ERROR (CHOLMOD_GPU_PROBLEM, "GPU memcopy from device") ; } cudaEventRecord ( Common->updateCBuffersFree[ibuf], Common->gpuStream[ibuf] ); gpu_row_start += gpu_row_chunk; ibuf++; ibuf = ibuf % CHOLMOD_HOST_SUPERNODE_BUFFERS; iblock ++; if ( iblock >= CHOLMOD_HOST_SUPERNODE_BUFFERS ) { Int gpu_row_start2 ; Int gpu_row_end ; /* then CHOLMOD_HOST_SUPERNODE_BUFFERS worth of work has been * scheduled, so check for completed events and copy result into * Lx before continuing. */ cudaEventSynchronize ( Common->updateCBuffersFree [iblock%CHOLMOD_HOST_SUPERNODE_BUFFERS] ); /* copy into Lx */ gpu_row_start2 = nscol2 + (iblock-CHOLMOD_HOST_SUPERNODE_BUFFERS) *gpu_row_max_chunk; gpu_row_end = gpu_row_start2+gpu_row_max_chunk; if ( gpu_row_end > nsrow ) gpu_row_end = nsrow; #pragma omp parallel for num_threads(CHOLMOD_OMP_NUM_THREADS) \ private(iidx) if ( nscol2 > 32 ) for ( j=0; j<nscol2; j++ ) { for ( i=gpu_row_start2*L_ENTRY; i<gpu_row_end*L_ENTRY; i++ ) { iidx = j*nsrow*L_ENTRY+i; Lx[psx*L_ENTRY+iidx] = gpu_p->h_Lx[iHostBuff][iidx]; } } } } /* Convenient to copy the L1 block here */ #pragma omp parallel for num_threads(CHOLMOD_OMP_NUM_THREADS) \ private ( iidx ) if ( nscol2 > 32 ) for ( j=0; j<nscol2; j++ ) { for ( i=j*L_ENTRY; i<nscol2*L_ENTRY; i++ ) { iidx = j*nsrow*L_ENTRY + i; Lx[psx*L_ENTRY+iidx] = gpu_p->h_Lx[iHostBuff][iidx]; } } /* now account for the last HSTREAMS buffers */ for ( iwrap=0; iwrap<CHOLMOD_HOST_SUPERNODE_BUFFERS; iwrap++ ) { int i, j; Int gpu_row_start2 = nscol2 + (iblock-CHOLMOD_HOST_SUPERNODE_BUFFERS) *gpu_row_max_chunk; if (iblock-CHOLMOD_HOST_SUPERNODE_BUFFERS >= 0 && gpu_row_start2 < nsrow ) { Int iidx; Int gpu_row_end = gpu_row_start2+gpu_row_max_chunk; if ( gpu_row_end > nsrow ) gpu_row_end = nsrow; cudaEventSynchronize ( Common->updateCBuffersFree [iblock%CHOLMOD_HOST_SUPERNODE_BUFFERS] ); /* copy into Lx */ #pragma omp parallel for num_threads(CHOLMOD_OMP_NUM_THREADS) \ private(iidx) if ( nscol2 > 32 ) for ( j=0; j<nscol2; j++ ) { for ( i=gpu_row_start2*L_ENTRY; i<gpu_row_end*L_ENTRY; i++ ) { iidx = j*nsrow*L_ENTRY+i; Lx[psx*L_ENTRY+iidx] = gpu_p->h_Lx[iHostBuff][iidx]; } } } iblock++; } /* ---------------------------------------------------------------------- */ /* return */ /* ---------------------------------------------------------------------- */ #ifndef NTIMER Common->CHOLMOD_GPU_TRSM_TIME += SuiteSparse_time ( ) - tstart ; #endif return (1) ; } /* ========================================================================== */ /* === gpu_copy_supernode =================================================== */ /* ========================================================================== */ /* * In the event gpu_triangular_sovle is not needed / called, this routine * copies the factored diagonal block from the GPU to the CPU. */ void TEMPLATE2 (CHOLMOD (gpu_copy_supernode)) ( cholmod_common *Common, double *Lx, Int psx, Int nscol, Int nscol2, Int nsrow, int supernodeUsedGPU, int iHostBuff, cholmod_gpu_pointers *gpu_p ) { Int iidx, i, j; if ( supernodeUsedGPU && nscol2 * L_ENTRY >= CHOLMOD_POTRF_LIMIT ) { cudaDeviceSynchronize(); #pragma omp parallel for num_threads(CHOLMOD_OMP_NUM_THREADS) \ private(iidx,i,j) if (nscol>32) for ( j=0; j<nscol; j++ ) { for ( i=j*L_ENTRY; i<nscol*L_ENTRY; i++ ) { iidx = j*nsrow*L_ENTRY+i; Lx[psx*L_ENTRY+iidx] = gpu_p->h_Lx[iHostBuff][iidx]; } } } return; } #endif #undef REAL #undef COMPLEX #undef ZOMPLEX
sp.c
/*-------------------------------------------------------------------- NAS Parallel Benchmarks 3.0 structured OpenMP C versions - SP This benchmark is an OpenMP C version of the NPB SP code. The OpenMP C 2.3 versions are derived by RWCP from the serial Fortran versions in "NPB 2.3-serial" developed by NAS. 3.0 translation is performed by the UVSQ. Permission to use, copy, distribute and modify this software for any purpose with or without fee is hereby granted. This software is provided "as is" without express or implied warranty. Information on OpenMP activities at RWCP is available at: http://pdplab.trc.rwcp.or.jp/pdperf/Omni/ Information on NAS Parallel Benchmarks 2.3 is available at: http://www.nas.nasa.gov/NAS/NPB/ --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- Author: R. Van der Wijngaart W. Saphir OpenMP C version: S. Satoh 3.0 structure translation: M. Popov --------------------------------------------------------------------*/ #include "../common/npb-C.h" /* global variables */ #include "header.h" /* function declarations */ static void add(void); static void adi(void); static void error_norm(double rms[5]); static void rhs_norm(double rms[5]); static void exact_rhs(void); static void exact_solution(double xi, double eta, double zeta, double dtemp[5]); static void initialize(void); static void lhsinit(void); static void lhsx(void); static void lhsy(void); static void lhsz(void); static void ninvr(void); static void pinvr(void); static void compute_rhs(void); static void set_constants(void); static void txinvr(void); static void tzetar(void); static void verify(int no_time_steps, char *class, boolean *verified); static void x_solve(void); static void y_solve(void); static void z_solve(void); /*-------------------------------------------------------------------- program SP c-------------------------------------------------------------------*/ int main(int argc, char **argv) { int niter, step; double mflops, tmax; int nthreads = 1; boolean verified; char class; FILE *fp; /*-------------------------------------------------------------------- c Read input file (if it exists), else take c defaults from parameters c-------------------------------------------------------------------*/ printf("\n\n NAS Parallel Benchmarks 3.0 structured OpenMP C version" " - SP Benchmark\n\n"); fp = fopen("inputsp.data", "r"); if (fp != NULL) { printf(" Reading from input file inputsp.data\n"); fscanf(fp, "%d", &niter); while (fgetc(fp) != '\n'); fscanf(fp, "%lf", &dt); while (fgetc(fp) != '\n'); fscanf(fp, "%d%d%d", &grid_points[0], &grid_points[1], &grid_points[2]); fclose(fp); } else { printf(" No input file inputsp.data. Using compiled defaults"); niter = NITER_DEFAULT; dt = DT_DEFAULT; grid_points[0] = PROBLEM_SIZE; grid_points[1] = PROBLEM_SIZE; grid_points[2] = PROBLEM_SIZE; } printf(" Size: %3dx%3dx%3d\n", grid_points[0], grid_points[1], grid_points[2]); printf(" Iterations: %3d dt: %10.6f\n", niter, dt); if ( (grid_points[0] > IMAX) || (grid_points[1] > JMAX) || (grid_points[2] > KMAX) ) { printf("%d, %d, %d\n", grid_points[0], grid_points[1], grid_points[2]); printf(" Problem size too big for compiled array sizes\n"); exit(1); } set_constants(); initialize(); lhsinit(); exact_rhs(); /*-------------------------------------------------------------------- c do one time step to touch all code, and reinitialize c-------------------------------------------------------------------*/ adi(); initialize(); timer_clear(1); timer_start(1); for (step = 1; step <= niter; step++) { if (step % 20 == 0 || step == 1) { printf(" Time step %4d\n", step); } adi(); } { //#if defined(_OPENMP) // nthreads = omp_get_num_threads(); //#endif /* _OPENMP */ } /* end parallel */ timer_stop(1); tmax = timer_read(1); verify(niter, &class, &verified); if (tmax != 0) { mflops = ( 881.174 * pow((double)PROBLEM_SIZE, 3.0) - 4683.91 * pow2((double)PROBLEM_SIZE) + 11484.5 * (double)PROBLEM_SIZE - 19272.4) * (double)niter / (tmax*1000000.0); } else { mflops = 0.0; } c_print_results("SP", class, grid_points[0], grid_points[1], grid_points[2], niter, nthreads, tmax, mflops, " floating point", verified, NPBVERSION, COMPILETIME, CS1, CS2, CS3, CS4, CS5, CS6, "(none)"); } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void add(void) { int i, j, k, m; /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c addition of update to the vector u c-------------------------------------------------------------------*/ #pragma omp parallel for for (m = 0; m < 5; m++) { #pragma omp parallel for for (i = 1; i <= grid_points[0]-2; i++) { #pragma omp parallel for for (j = 1; j <= grid_points[1]-2; j++) { #pragma omp parallel for for (k = 1; k <= grid_points[2]-2; k++) { u[m][i][j][k] = u[m][i][j][k] + rhs[m][i][j][k]; } } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void adi(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ compute_rhs(); txinvr(); x_solve(); y_solve(); z_solve(); add(); } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void error_norm(double rms[5]) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c this function computes the norm of the difference between the c computed solution and the exact solution c-------------------------------------------------------------------*/ int i, j, k, m, d; double xi, eta, zeta, u_exact[5], add; #pragma omp parallel for for (m = 0; m < 5; m++) { rms[m] = 0.0; } for (i = 0; i <= grid_points[0]-1; i++) { xi = (double)i * dnxm1; for (j = 0; j <= grid_points[1]-1; j++) { eta = (double)j * dnym1; for (k = 0; k <= grid_points[2]-1; k++) { zeta = (double)k * dnzm1; exact_solution(xi, eta, zeta, u_exact); #pragma omp parallel for private(add) for (m = 0; m < 5; m++) { add = u[m][i][j][k] - u_exact[m]; rms[m] = rms[m] + add*add; } } } } #pragma omp parallel for for (m = 0; m < 5; m++) { #pragma omp parallel for for (d = 0; d < 3; d++) { rms[m] = rms[m] / (double)(grid_points[d]-2); } rms[m] = sqrt(rms[m]); } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void rhs_norm(double rms[5]) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ int i, j, k, d, m; double add; #pragma omp parallel for for (m = 0; m < 5; m++) { rms[m] = 0.0; } //#pragma omp parallel for for (i = 0; i <= grid_points[0]-2; i++) { //#pragma omp parallel for for (j = 0; j <= grid_points[1]-2; j++) { //#pragma omp parallel for for (k = 0; k <= grid_points[2]-2; k++) { #pragma omp parallel for private(add) for (m = 0; m < 5; m++) { add = rhs[m][i][j][k]; rms[m] = rms[m] + add*add; } } } } #pragma omp parallel for for (m = 0; m < 5; m++) { #pragma omp parallel for for (d = 0; d < 3; d++) { rms[m] = rms[m] / (double)(grid_points[d]-2); } rms[m] = sqrt(rms[m]); } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void exact_rhs(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c compute the right hand side based on exact solution c-------------------------------------------------------------------*/ double dtemp[5], xi, eta, zeta, dtpp; int m, i, j, k, ip1, im1, jp1, jm1, km1, kp1; /*-------------------------------------------------------------------- c initialize c-------------------------------------------------------------------*/ #pragma omp parallel for for (m = 0; m < 5; m++) { #pragma omp parallel for for (i = 0; i <= grid_points[0]-1; i++) { #pragma omp parallel for for (j = 0; j <= grid_points[1]-1; j++) { #pragma omp parallel for for (k= 0; k <= grid_points[2]-1; k++) { forcing[m][i][j][k] = 0.0; } } } } /*-------------------------------------------------------------------- c xi-direction flux differences c-------------------------------------------------------------------*/ for (k = 1; k <= grid_points[2]-2; k++) { zeta = (double)k * dnzm1; for (j = 1; j <= grid_points[1]-2; j++) { eta = (double)j * dnym1; for (i = 0; i <= grid_points[0]-1; i++) { xi = (double)i * dnxm1; exact_solution(xi, eta, zeta, dtemp); #pragma omp parallel for for (m = 0; m < 5; m++) { ue[m][i] = dtemp[m]; } dtpp = 1.0 / dtemp[0]; #pragma omp parallel for for (m = 1; m < 5; m++) { buf[m][i] = dtpp * dtemp[m]; } cuf[i] = buf[1][i] * buf[1][i]; buf[0][i] = cuf[i] + buf[2][i] * buf[2][i] + buf[3][i] * buf[3][i]; q[i] = 0.5 * (buf[1][i]*ue[1][i] + buf[2][i]*ue[2][i] + buf[3][i]*ue[3][i]); } #pragma omp parallel for private(im1, ip1) for (i = 1; i <= grid_points[0]-2; i++) { im1 = i-1; ip1 = i+1; forcing[0][i][j][k] = forcing[0][i][j][k] - tx2*( ue[1][ip1]-ue[1][im1] )+ dx1tx1*(ue[0][ip1]-2.0*ue[0][i]+ue[0][im1]); forcing[1][i][j][k] = forcing[1][i][j][k] - tx2 * ((ue[1][ip1]*buf[1][ip1]+c2*(ue[4][ip1]-q[ip1]))- (ue[1][im1]*buf[1][im1]+c2*(ue[4][im1]-q[im1])))+ xxcon1*(buf[1][ip1]-2.0*buf[1][i]+buf[1][im1])+ dx2tx1*( ue[1][ip1]-2.0* ue[1][i]+ue[1][im1]); forcing[2][i][j][k] = forcing[2][i][j][k] - tx2 * (ue[2][ip1]*buf[1][ip1]-ue[2][im1]*buf[1][im1])+ xxcon2*(buf[2][ip1]-2.0*buf[2][i]+buf[2][im1])+ dx3tx1*( ue[2][ip1]-2.0*ue[2][i] +ue[2][im1]); forcing[3][i][j][k] = forcing[3][i][j][k] - tx2*(ue[3][ip1]*buf[1][ip1]-ue[3][im1]*buf[1][im1])+ xxcon2*(buf[3][ip1]-2.0*buf[3][i]+buf[3][im1])+ dx4tx1*( ue[3][ip1]-2.0* ue[3][i]+ ue[3][im1]); forcing[4][i][j][k] = forcing[4][i][j][k] - tx2*(buf[1][ip1]*(c1*ue[4][ip1]-c2*q[ip1])- buf[1][im1]*(c1*ue[4][im1]-c2*q[im1]))+ 0.5*xxcon3*(buf[0][ip1]-2.0*buf[0][i]+ buf[0][im1])+ xxcon4*(cuf[ip1]-2.0*cuf[i]+cuf[im1])+ xxcon5*(buf[4][ip1]-2.0*buf[4][i]+buf[4][im1])+ dx5tx1*( ue[4][ip1]-2.0* ue[4][i]+ ue[4][im1]); } /*-------------------------------------------------------------------- c Fourth-order dissipation c-------------------------------------------------------------------*/ #pragma omp parallel for private(i) for (m = 0; m < 5; m++) { i = 1; forcing[m][i][j][k] = forcing[m][i][j][k] - dssp * (5.0*ue[m][i] - 4.0*ue[m][i+1] +ue[m][i+2]); i = 2; forcing[m][i][j][k] = forcing[m][i][j][k] - dssp * (-4.0*ue[m][i-1] + 6.0*ue[m][i] - 4.0*ue[m][i+1] + ue[m][i+2]); } #pragma omp parallel for for (m = 0; m < 5; m++) { #pragma omp parallel for for (i = 3; i <= grid_points[0]-4; i++) { forcing[m][i][j][k] = forcing[m][i][j][k] - dssp* (ue[m][i-2] - 4.0*ue[m][i-1] + 6.0*ue[m][i] - 4.0*ue[m][i+1] + ue[m][i+2]); } } #pragma omp parallel for private(i) for (m = 0; m < 5; m++) { i = grid_points[0]-3; forcing[m][i][j][k] = forcing[m][i][j][k] - dssp * (ue[m][i-2] - 4.0*ue[m][i-1] + 6.0*ue[m][i] - 4.0*ue[m][i+1]); i = grid_points[0]-2; forcing[m][i][j][k] = forcing[m][i][j][k] - dssp * (ue[m][i-2] - 4.0*ue[m][i-1] + 5.0*ue[m][i]); } } } /*-------------------------------------------------------------------- c eta-direction flux differences c-------------------------------------------------------------------*/ for (k = 1; k <= grid_points[2]-2; k++) { zeta = (double)k * dnzm1; for (i = 1; i <= grid_points[0]-2; i++) { xi = (double)i * dnxm1; for (j = 0; j <= grid_points[1]-1; j++) { eta = (double)j * dnym1; exact_solution(xi, eta, zeta, dtemp); #pragma omp parallel for for (m = 0; m < 5; m++) { ue[m][j] = dtemp[m]; } dtpp = 1.0/dtemp[0]; #pragma omp parallel for for (m = 1; m < 5; m++) { buf[m][j] = dtpp * dtemp[m]; } cuf[j] = buf[2][j] * buf[2][j]; buf[0][j] = cuf[j] + buf[1][j] * buf[1][j] + buf[3][j] * buf[3][j]; q[j] = 0.5*(buf[1][j]*ue[1][j] + buf[2][j]*ue[2][j] + buf[3][j]*ue[3][j]); } #pragma omp parallel for private(jm1, jp1) for (j = 1; j <= grid_points[1]-2; j++) { jm1 = j-1; jp1 = j+1; forcing[0][i][j][k] = forcing[0][i][j][k] - ty2*( ue[2][jp1]-ue[2][jm1] )+ dy1ty1*(ue[0][jp1]-2.0*ue[0][j]+ue[0][jm1]); forcing[1][i][j][k] = forcing[1][i][j][k] - ty2*(ue[1][jp1]*buf[2][jp1]-ue[1][jm1]*buf[2][jm1])+ yycon2*(buf[1][jp1]-2.0*buf[1][j]+buf[1][jm1])+ dy2ty1*( ue[1][jp1]-2.0* ue[1][j]+ ue[1][jm1]); forcing[2][i][j][k] = forcing[2][i][j][k] - ty2*((ue[2][jp1]*buf[2][jp1]+c2*(ue[4][jp1]-q[jp1]))- (ue[2][jm1]*buf[2][jm1]+c2*(ue[4][jm1]-q[jm1])))+ yycon1*(buf[2][jp1]-2.0*buf[2][j]+buf[2][jm1])+ dy3ty1*( ue[2][jp1]-2.0*ue[2][j] +ue[2][jm1]); forcing[3][i][j][k] = forcing[3][i][j][k] - ty2*(ue[3][jp1]*buf[2][jp1]-ue[3][jm1]*buf[2][jm1])+ yycon2*(buf[3][jp1]-2.0*buf[3][j]+buf[3][jm1])+ dy4ty1*( ue[3][jp1]-2.0*ue[3][j]+ ue[3][jm1]); forcing[4][i][j][k] = forcing[4][i][j][k] - ty2*(buf[2][jp1]*(c1*ue[4][jp1]-c2*q[jp1])- buf[2][jm1]*(c1*ue[4][jm1]-c2*q[jm1]))+ 0.5*yycon3*(buf[0][jp1]-2.0*buf[0][j]+ buf[0][jm1])+ yycon4*(cuf[jp1]-2.0*cuf[j]+cuf[jm1])+ yycon5*(buf[4][jp1]-2.0*buf[4][j]+buf[4][jm1])+ dy5ty1*(ue[4][jp1]-2.0*ue[4][j]+ue[4][jm1]); } /*-------------------------------------------------------------------- c Fourth-order dissipation c-------------------------------------------------------------------*/ #pragma omp parallel for private(j) for (m = 0; m < 5; m++) { j = 1; forcing[m][i][j][k] = forcing[m][i][j][k] - dssp * (5.0*ue[m][j] - 4.0*ue[m][j+1] +ue[m][j+2]); j = 2; forcing[m][i][j][k] = forcing[m][i][j][k] - dssp * (-4.0*ue[m][j-1] + 6.0*ue[m][j] - 4.0*ue[m][j+1] + ue[m][j+2]); } #pragma omp parallel for for (m = 0; m < 5; m++) { #pragma omp parallel for for (j = 3; j <= grid_points[1]-4; j++) { forcing[m][i][j][k] = forcing[m][i][j][k] - dssp* (ue[m][j-2] - 4.0*ue[m][j-1] + 6.0*ue[m][j] - 4.0*ue[m][j+1] + ue[m][j+2]); } } #pragma omp parallel for private(j) for (m = 0; m < 5; m++) { j = grid_points[1]-3; forcing[m][i][j][k] = forcing[m][i][j][k] - dssp * (ue[m][j-2] - 4.0*ue[m][j-1] + 6.0*ue[m][j] - 4.0*ue[m][j+1]); j = grid_points[1]-2; forcing[m][i][j][k] = forcing[m][i][j][k] - dssp * (ue[m][j-2] - 4.0*ue[m][j-1] + 5.0*ue[m][j]); } } } /*-------------------------------------------------------------------- c zeta-direction flux differences c-------------------------------------------------------------------*/ for (j = 1; j <= grid_points[1]-2; j++) { eta = (double)j * dnym1; for (i = 1; i <= grid_points[0]-2; i++) { xi = (double)i * dnxm1; for (k = 0; k <= grid_points[2]-1; k++) { zeta = (double)k * dnzm1; exact_solution(xi, eta, zeta, dtemp); #pragma omp parallel for for (m = 0; m < 5; m++) { ue[m][k] = dtemp[m]; } dtpp = 1.0/dtemp[0]; #pragma omp parallel for for (m = 1; m < 5; m++) { buf[m][k] = dtpp * dtemp[m]; } cuf[k] = buf[3][k] * buf[3][k]; buf[0][k] = cuf[k] + buf[1][k] * buf[1][k] + buf[2][k] * buf[2][k]; q[k] = 0.5*(buf[1][k]*ue[1][k] + buf[2][k]*ue[2][k] + buf[3][k]*ue[3][k]); } #pragma omp parallel for private(km1, kp1) for (k = 1; k <= grid_points[2]-2; k++) { km1 = k-1; kp1 = k+1; forcing[0][i][j][k] = forcing[0][i][j][k] - tz2*( ue[3][kp1]-ue[3][km1] )+ dz1tz1*(ue[0][kp1]-2.0*ue[0][k]+ue[0][km1]); forcing[1][i][j][k] = forcing[1][i][j][k] - tz2 * (ue[1][kp1]*buf[3][kp1]-ue[1][km1]*buf[3][km1])+ zzcon2*(buf[1][kp1]-2.0*buf[1][k]+buf[1][km1])+ dz2tz1*( ue[1][kp1]-2.0* ue[1][k]+ ue[1][km1]); forcing[2][i][j][k] = forcing[2][i][j][k] - tz2 * (ue[2][kp1]*buf[3][kp1]-ue[2][km1]*buf[3][km1])+ zzcon2*(buf[2][kp1]-2.0*buf[2][k]+buf[2][km1])+ dz3tz1*(ue[2][kp1]-2.0*ue[2][k]+ue[2][km1]); forcing[3][i][j][k] = forcing[3][i][j][k] - tz2 * ((ue[3][kp1]*buf[3][kp1]+c2*(ue[4][kp1]-q[kp1]))- (ue[3][km1]*buf[3][km1]+c2*(ue[4][km1]-q[km1])))+ zzcon1*(buf[3][kp1]-2.0*buf[3][k]+buf[3][km1])+ dz4tz1*( ue[3][kp1]-2.0*ue[3][k] +ue[3][km1]); forcing[4][i][j][k] = forcing[4][i][j][k] - tz2 * (buf[3][kp1]*(c1*ue[4][kp1]-c2*q[kp1])- buf[3][km1]*(c1*ue[4][km1]-c2*q[km1]))+ 0.5*zzcon3*(buf[0][kp1]-2.0*buf[0][k] +buf[0][km1])+ zzcon4*(cuf[kp1]-2.0*cuf[k]+cuf[km1])+ zzcon5*(buf[4][kp1]-2.0*buf[4][k]+buf[4][km1])+ dz5tz1*( ue[4][kp1]-2.0*ue[4][k]+ ue[4][km1]); } /*-------------------------------------------------------------------- c Fourth-order dissipation c-------------------------------------------------------------------*/ #pragma omp parallel for private(k) for (m = 0; m < 5; m++) { k = 1; forcing[m][i][j][k] = forcing[m][i][j][k] - dssp * (5.0*ue[m][k] - 4.0*ue[m][k+1] +ue[m][k+2]); k = 2; forcing[m][i][j][k] = forcing[m][i][j][k] - dssp * (-4.0*ue[m][k-1] + 6.0*ue[m][k] - 4.0*ue[m][k+1] + ue[m][k+2]); } #pragma omp parallel for for (m = 0; m < 5; m++) { #pragma omp parallel for for (k = 3; k <= grid_points[2]-4; k++) { forcing[m][i][j][k] = forcing[m][i][j][k] - dssp* (ue[m][k-2] - 4.0*ue[m][k-1] + 6.0*ue[m][k] - 4.0*ue[m][k+1] + ue[m][k+2]); } } #pragma omp parallel for private(k) for (m = 0; m < 5; m++) { k = grid_points[2]-3; forcing[m][i][j][k] = forcing[m][i][j][k] - dssp * (ue[m][k-2] - 4.0*ue[m][k-1] + 6.0*ue[m][k] - 4.0*ue[m][k+1]); k = grid_points[2]-2; forcing[m][i][j][k] = forcing[m][i][j][k] - dssp * (ue[m][k-2] - 4.0*ue[m][k-1] + 5.0*ue[m][k]); } } } /*-------------------------------------------------------------------- c now change the sign of the forcing function, c-------------------------------------------------------------------*/ #pragma omp parallel for for (m = 0; m < 5; m++) { #pragma omp parallel for for (i = 1; i <= grid_points[0]-2; i++) { #pragma omp parallel for for (j = 1; j <= grid_points[1]-2; j++) { #pragma omp parallel for for (k = 1; k <= grid_points[2]-2; k++) { forcing[m][i][j][k] = -1.0 * forcing[m][i][j][k]; } } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void exact_solution(double xi, double eta, double zeta, double dtemp[5]) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c this function returns the exact solution at point xi, eta, zeta c-------------------------------------------------------------------*/ int m; #pragma omp parallel for for (m = 0; m < 5; m++) { dtemp[m] = ce[0][m] + xi*(ce[1][m] + xi*(ce[4][m] + xi*(ce[7][m] + xi*ce[10][m]))) + eta*(ce[2][m] + eta*(ce[5][m] + eta*(ce[8][m] + eta*ce[11][m])))+ zeta*(ce[3][m] + zeta*(ce[6][m] + zeta*(ce[9][m] + zeta*ce[12][m]))); } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void initialize(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c This subroutine initializes the field variable u using c tri-linear transfinite interpolation of the boundary values c-------------------------------------------------------------------*/ int i, j, k, m, ix, iy, iz; double xi, eta, zeta, Pface[2][3][5], Pxi, Peta, Pzeta, temp[5]; /*-------------------------------------------------------------------- c Later (in compute_rhs) we compute 1/u for every element. A few of c the corner elements are not used, but it convenient (and faster) c to compute the whole thing with a simple loop. Make sure those c values are nonzero by initializing the whole thing here. c-------------------------------------------------------------------*/ #pragma omp parallel for for (i = 0; i <= IMAX-1; i++) { #pragma omp parallel for for (j = 0; j <= IMAX-1; j++) { #pragma omp parallel for for (k = 0; k <= IMAX-1; k++) { u[0][i][j][k] = 1.0; u[1][i][j][k] = 0.0; u[2][i][j][k] = 0.0; u[3][i][j][k] = 0.0; u[4][i][j][k] = 1.0; } } } /*-------------------------------------------------------------------- c first store the "interpolated" values everywhere on the grid c-------------------------------------------------------------------*/ for (i = 0; i <= grid_points[0]-1; i++) { xi = (double)i * dnxm1; for (j = 0; j <= grid_points[1]-1; j++) { eta = (double)j * dnym1; for (k = 0; k <= grid_points[2]-1; k++) { zeta = (double)k * dnzm1; for (ix = 0; ix < 2; ix++) { exact_solution((double)ix, eta, zeta, &Pface[ix][0][0]); } for (iy = 0; iy < 2; iy++) { exact_solution(xi, (double)iy , zeta, &Pface[iy][1][0]); } for (iz = 0; iz < 2; iz++) { exact_solution(xi, eta, (double)iz, &Pface[iz][2][0]); } #pragma omp parallel for private(Pxi ,Peta ,Pzeta) for (m = 0; m < 5; m++) { Pxi = xi * Pface[1][0][m] + (1.0-xi) * Pface[0][0][m]; Peta = eta * Pface[1][1][m] + (1.0-eta) * Pface[0][1][m]; Pzeta = zeta * Pface[1][2][m] + (1.0-zeta) * Pface[0][2][m]; u[m][i][j][k] = Pxi + Peta + Pzeta - Pxi*Peta - Pxi*Pzeta - Peta*Pzeta + Pxi*Peta*Pzeta; } } } } /*-------------------------------------------------------------------- c now store the exact values on the boundaries c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c west face c-------------------------------------------------------------------*/ xi = 0.0; i = 0; for (j = 0; j < grid_points[1]; j++) { eta = (double)j * dnym1; for (k = 0; k < grid_points[2]; k++) { zeta = (double)k * dnzm1; exact_solution(xi, eta, zeta, temp); #pragma omp parallel for for (m = 0; m < 5; m++) { u[m][i][j][k] = temp[m]; } } } /*-------------------------------------------------------------------- c east face c-------------------------------------------------------------------*/ xi = 1.0; i = grid_points[0]-1; for (j = 0; j < grid_points[1]; j++) { eta = (double)j * dnym1; for (k = 0; k < grid_points[2]; k++) { zeta = (double)k * dnzm1; exact_solution(xi, eta, zeta, temp); #pragma omp parallel for for (m = 0; m < 5; m++) { u[m][i][j][k] = temp[m]; } } } /*-------------------------------------------------------------------- c south face c-------------------------------------------------------------------*/ eta = 0.0; j = 0; for (i = 0; i < grid_points[0]; i++) { xi = (double)i * dnxm1; for (k = 0; k < grid_points[2]; k++) { zeta = (double)k * dnzm1; exact_solution(xi, eta, zeta, temp); #pragma omp parallel for for (m = 0; m < 5; m++) { u[m][i][j][k] = temp[m]; } } } /*-------------------------------------------------------------------- c north face c-------------------------------------------------------------------*/ eta = 1.0; j = grid_points[1]-1; for (i = 0; i < grid_points[0]; i++) { xi = (double)i * dnxm1; for (k = 0; k < grid_points[2]; k++) { zeta = (double)k * dnzm1; exact_solution(xi, eta, zeta, temp); #pragma omp parallel for for (m = 0; m < 5; m++) { u[m][i][j][k] = temp[m]; } } } /*-------------------------------------------------------------------- c bottom face c-------------------------------------------------------------------*/ zeta = 0.0; k = 0; for (i = 0; i < grid_points[0]; i++) { xi = (double)i *dnxm1; for (j = 0; j < grid_points[1]; j++) { eta = (double)j * dnym1; exact_solution(xi, eta, zeta, temp); #pragma omp parallel for for (m = 0; m < 5; m++) { u[m][i][j][k] = temp[m]; } } } /*-------------------------------------------------------------------- c top face c-------------------------------------------------------------------*/ zeta = 1.0; k = grid_points[2]-1; for (i = 0; i < grid_points[0]; i++) { xi = (double)i * dnxm1; for (j = 0; j < grid_points[1]; j++) { eta = (double)j * dnym1; exact_solution(xi, eta, zeta, temp); for (m = 0; m < 5; m++) { u[m][i][j][k] = temp[m]; } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void lhsinit(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ int i, j, k, n; /*-------------------------------------------------------------------- c zap the whole left hand side for starters c-------------------------------------------------------------------*/ #pragma omp parallel for for (n = 0; n < 15; n++) { #pragma omp parallel for for (i = 0; i < grid_points[0]; i++) { #pragma omp parallel for for (j = 0; j < grid_points[1]; j++) { #pragma omp parallel for for (k = 0; k < grid_points[2]; k++) { lhs[n][i][j][k] = 0.0; } } } } /*-------------------------------------------------------------------- c next, set all diagonal values to 1. This is overkill, but c convenient c-------------------------------------------------------------------*/ #pragma omp parallel for for (n = 0; n < 3; n++) { #pragma omp parallel for for (i = 0; i < grid_points[0]; i++) { #pragma omp parallel for for (j = 0; j < grid_points[1]; j++) { #pragma omp parallel for for (k = 0; k < grid_points[2]; k++) { lhs[5*n+2][i][j][k] = 1.0; } } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void lhsx(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c This function computes the left hand side for the three x-factors c-------------------------------------------------------------------*/ double ru1; int i, j, k; /*-------------------------------------------------------------------- c first fill the lhs for the u-eigenvalue c-------------------------------------------------------------------*/ for (j = 1; j <= grid_points[1]-2; j++) { for (k = 1; k <= grid_points[2]-2; k++) { #pragma omp parallel for private(ru1) for (i = 0; i <= grid_points[0]-1; i++) { ru1 = c3c4*rho_i[i][j][k]; cv[i] = us[i][j][k]; rhon[i] = max(dx2+con43*ru1, max(dx5+c1c5*ru1, max(dxmax+ru1, dx1))); } #pragma omp parallel for for (i = 1; i <= grid_points[0]-2; i++) { lhs[0][i][j][k] = 0.0; lhs[1][i][j][k] = - dttx2 * cv[i-1] - dttx1 * rhon[i-1]; lhs[2][i][j][k] = 1.0 + c2dttx1 * rhon[i]; lhs[3][i][j][k] = dttx2 * cv[i+1] - dttx1 * rhon[i+1]; lhs[4][i][j][k] = 0.0; } } } /*-------------------------------------------------------------------- c add fourth order dissipation c-------------------------------------------------------------------*/ i = 1; #pragma omp for for (j = 1; j <= grid_points[1]-2; j++) { #pragma omp parallel for for (k = 1; k <= grid_points[2]-2; k++) { lhs[2][i][j][k] = lhs[2][i][j][k] + comz5; lhs[3][i][j][k] = lhs[3][i][j][k] - comz4; lhs[4][i][j][k] = lhs[4][i][j][k] + comz1; lhs[1][i+1][j][k] = lhs[1][i+1][j][k] - comz4; lhs[2][i+1][j][k] = lhs[2][i+1][j][k] + comz6; lhs[3][i+1][j][k] = lhs[3][i+1][j][k] - comz4; lhs[4][i+1][j][k] = lhs[4][i+1][j][k] + comz1; } } #pragma omp parallel for for (i = 3; i <= grid_points[0]-4; i++) { #pragma omp parallel for for (j = 1; j <= grid_points[1]-2; j++) { #pragma omp parallel for for (k = 1; k <= grid_points[2]-2; k++) { lhs[0][i][j][k] = lhs[0][i][j][k] + comz1; lhs[1][i][j][k] = lhs[1][i][j][k] - comz4; lhs[2][i][j][k] = lhs[2][i][j][k] + comz6; lhs[3][i][j][k] = lhs[3][i][j][k] - comz4; lhs[4][i][j][k] = lhs[4][i][j][k] + comz1; } } } i = grid_points[0]-3; #pragma omp parallel for for (j = 1; j <= grid_points[1]-2; j++) { #pragma omp parallel for for (k = 1; k <= grid_points[2]-2; k++) { lhs[0][i][j][k] = lhs[0][i][j][k] + comz1; lhs[1][i][j][k] = lhs[1][i][j][k] - comz4; lhs[2][i][j][k] = lhs[2][i][j][k] + comz6; lhs[3][i][j][k] = lhs[3][i][j][k] - comz4; lhs[0][i+1][j][k] = lhs[0][i+1][j][k] + comz1; lhs[1][i+1][j][k] = lhs[1][i+1][j][k] - comz4; lhs[2][i+1][j][k] = lhs[2][i+1][j][k] + comz5; } } /*-------------------------------------------------------------------- c subsequently, fill the other factors (u+c), (u-c) by adding to c the first c-------------------------------------------------------------------*/ #pragma omp parallel for for (i = 1; i <= grid_points[0]-2; i++) { #pragma omp parallel for for (j = 1; j <= grid_points[1]-2; j++) { #pragma omp parallel for for (k = 1; k <= grid_points[2]-2; k++) { lhs[0+5][i][j][k] = lhs[0][i][j][k]; lhs[1+5][i][j][k] = lhs[1][i][j][k] - dttx2 * speed[i-1][j][k]; lhs[2+5][i][j][k] = lhs[2][i][j][k]; lhs[3+5][i][j][k] = lhs[3][i][j][k] + dttx2 * speed[i+1][j][k]; lhs[4+5][i][j][k] = lhs[4][i][j][k]; lhs[0+10][i][j][k] = lhs[0][i][j][k]; lhs[1+10][i][j][k] = lhs[1][i][j][k] + dttx2 * speed[i-1][j][k]; lhs[2+10][i][j][k] = lhs[2][i][j][k]; lhs[3+10][i][j][k] = lhs[3][i][j][k] - dttx2 * speed[i+1][j][k]; lhs[4+10][i][j][k] = lhs[4][i][j][k]; } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void lhsy(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c This function computes the left hand side for the three y-factors c-------------------------------------------------------------------*/ double ru1; int i, j, k; /*-------------------------------------------------------------------- c first fill the lhs for the u-eigenvalue c-------------------------------------------------------------------*/ for (i = 1; i <= grid_points[0]-2; i++) { for (k = 1; k <= grid_points[2]-2; k++) { #pragma omp parallel for private(ru1) for (j = 0; j <= grid_points[1]-1; j++) { ru1 = c3c4*rho_i[i][j][k]; cv[j] = vs[i][j][k]; rhoq[j] = max(dy3 + con43 * ru1, max(dy5 + c1c5*ru1, max(dymax + ru1, dy1))); } #pragma omp parallel for for (j = 1; j <= grid_points[1]-2; j++) { lhs[0][i][j][k] = 0.0; lhs[1][i][j][k] = -dtty2 * cv[j-1] - dtty1 * rhoq[j-1]; lhs[2][i][j][k] = 1.0 + c2dtty1 * rhoq[j]; lhs[3][i][j][k] = dtty2 * cv[j+1] - dtty1 * rhoq[j+1]; lhs[4][i][j][k] = 0.0; } } } /*-------------------------------------------------------------------- c add fourth order dissipation c-------------------------------------------------------------------*/ j = 1; #pragma omp parallel for for (i = 1; i <= grid_points[0]-2; i++) { #pragma omp parallel for for (k = 1; k <= grid_points[2]-2; k++) { lhs[2][i][j][k] = lhs[2][i][j][k] + comz5; lhs[3][i][j][k] = lhs[3][i][j][k] - comz4; lhs[4][i][j][k] = lhs[4][i][j][k] + comz1; lhs[1][i][j+1][k] = lhs[1][i][j+1][k] - comz4; lhs[2][i][j+1][k] = lhs[2][i][j+1][k] + comz6; lhs[3][i][j+1][k] = lhs[3][i][j+1][k] - comz4; lhs[4][i][j+1][k] = lhs[4][i][j+1][k] + comz1; } } #pragma omp parallel for for (i = 1; i <= grid_points[0]-2; i++) { #pragma omp parallel for for (j = 3; j <= grid_points[1]-4; j++) { #pragma omp parallel for for (k = 1; k <= grid_points[2]-2; k++) { lhs[0][i][j][k] = lhs[0][i][j][k] + comz1; lhs[1][i][j][k] = lhs[1][i][j][k] - comz4; lhs[2][i][j][k] = lhs[2][i][j][k] + comz6; lhs[3][i][j][k] = lhs[3][i][j][k] - comz4; lhs[4][i][j][k] = lhs[4][i][j][k] + comz1; } } } j = grid_points[1]-3; #pragma omp for for (i = 1; i <= grid_points[0]-2; i++) { #pragma omp parallel for for (k = 1; k <= grid_points[2]-2; k++) { lhs[0][i][j][k] = lhs[0][i][j][k] + comz1; lhs[1][i][j][k] = lhs[1][i][j][k] - comz4; lhs[2][i][j][k] = lhs[2][i][j][k] + comz6; lhs[3][i][j][k] = lhs[3][i][j][k] - comz4; lhs[0][i][j+1][k] = lhs[0][i][j+1][k] + comz1; lhs[1][i][j+1][k] = lhs[1][i][j+1][k] - comz4; lhs[2][i][j+1][k] = lhs[2][i][j+1][k] + comz5; } } /*-------------------------------------------------------------------- c subsequently, do the other two factors c-------------------------------------------------------------------*/ #pragma omp parallel for for (i = 1; i <= grid_points[0]-2; i++) { #pragma omp parallel for for (j = 1; j <= grid_points[1]-2; j++) { #pragma omp parallel for for (k = 1; k <= grid_points[2]-2; k++) { lhs[0+5][i][j][k] = lhs[0][i][j][k]; lhs[1+5][i][j][k] = lhs[1][i][j][k] - dtty2 * speed[i][j-1][k]; lhs[2+5][i][j][k] = lhs[2][i][j][k]; lhs[3+5][i][j][k] = lhs[3][i][j][k] + dtty2 * speed[i][j+1][k]; lhs[4+5][i][j][k] = lhs[4][i][j][k]; lhs[0+10][i][j][k] = lhs[0][i][j][k]; lhs[1+10][i][j][k] = lhs[1][i][j][k] + dtty2 * speed[i][j-1][k]; lhs[2+10][i][j][k] = lhs[2][i][j][k]; lhs[3+10][i][j][k] = lhs[3][i][j][k] - dtty2 * speed[i][j+1][k]; lhs[4+10][i][j][k] = lhs[4][i][j][k]; } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void lhsz(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c This function computes the left hand side for the three z-factors c-------------------------------------------------------------------*/ double ru1; int i, j, k; /*-------------------------------------------------------------------- c first fill the lhs for the u-eigenvalue c-------------------------------------------------------------------*/ for (i = 1; i <= grid_points[0]-2; i++) { for (j = 1; j <= grid_points[1]-2; j++) { #pragma omp parallel for private(ru1) for (k = 0; k <= grid_points[2]-1; k++) { ru1 = c3c4*rho_i[i][j][k]; cv[k] = ws[i][j][k]; rhos[k] = max(dz4 + con43 * ru1, max(dz5 + c1c5 * ru1, max(dzmax + ru1, dz1))); } #pragma omp parallel for for (k = 1; k <= grid_points[2]-2; k++) { lhs[0][i][j][k] = 0.0; lhs[1][i][j][k] = -dttz2 * cv[k-1] - dttz1 * rhos[k-1]; lhs[2][i][j][k] = 1.0 + c2dttz1 * rhos[k]; lhs[3][i][j][k] = dttz2 * cv[k+1] - dttz1 * rhos[k+1]; lhs[4][i][j][k] = 0.0; } } } /*-------------------------------------------------------------------- c add fourth order dissipation c-------------------------------------------------------------------*/ k = 1; #pragma omp parallel for for (i = 1; i <= grid_points[0]-2; i++) { #pragma omp parallel for for (j = 1; j <= grid_points[1]-2; j++) { lhs[2][i][j][k] = lhs[2][i][j][k] + comz5; lhs[3][i][j][k] = lhs[3][i][j][k] - comz4; lhs[4][i][j][k] = lhs[4][i][j][k] + comz1; lhs[1][i][j][k+1] = lhs[1][i][j][k+1] - comz4; lhs[2][i][j][k+1] = lhs[2][i][j][k+1] + comz6; lhs[3][i][j][k+1] = lhs[3][i][j][k+1] - comz4; lhs[4][i][j][k+1] = lhs[4][i][j][k+1] + comz1; } } #pragma omp parallel for for (i = 1; i <= grid_points[0]-2; i++) { #pragma omp parallel for for (j = 1; j <= grid_points[1]-2; j++) { #pragma omp parallel for for (k = 3; k <= grid_points[2]-4; k++) { lhs[0][i][j][k] = lhs[0][i][j][k] + comz1; lhs[1][i][j][k] = lhs[1][i][j][k] - comz4; lhs[2][i][j][k] = lhs[2][i][j][k] + comz6; lhs[3][i][j][k] = lhs[3][i][j][k] - comz4; lhs[4][i][j][k] = lhs[4][i][j][k] + comz1; } } } k = grid_points[2]-3; #pragma omp parallel for for (i = 1; i <= grid_points[0]-2; i++) { #pragma omp parallel for for (j = 1; j <= grid_points[1]-2; j++) { lhs[0][i][j][k] = lhs[0][i][j][k] + comz1; lhs[1][i][j][k] = lhs[1][i][j][k] - comz4; lhs[2][i][j][k] = lhs[2][i][j][k] + comz6; lhs[3][i][j][k] = lhs[3][i][j][k] - comz4; lhs[0][i][j][k+1] = lhs[0][i][j][k+1] + comz1; lhs[1][i][j][k+1] = lhs[1][i][j][k+1] - comz4; lhs[2][i][j][k+1] = lhs[2][i][j][k+1] + comz5; } } /*-------------------------------------------------------------------- c subsequently, fill the other factors (u+c), (u-c) c-------------------------------------------------------------------*/ #pragma omp parallel for for (i = 1; i <= grid_points[0]-2; i++) { #pragma omp parallel for for (j = 1; j <= grid_points[1]-2; j++) { #pragma omp parallel for for (k = 1; k <= grid_points[2]-2; k++) { lhs[0+5][i][j][k] = lhs[0][i][j][k]; lhs[1+5][i][j][k] = lhs[1][i][j][k] - dttz2 * speed[i][j][k-1]; lhs[2+5][i][j][k] = lhs[2][i][j][k]; lhs[3+5][i][j][k] = lhs[3][i][j][k] + dttz2 * speed[i][j][k+1]; lhs[4+5][i][j][k] = lhs[4][i][j][k]; lhs[0+10][i][j][k] = lhs[0][i][j][k]; lhs[1+10][i][j][k] = lhs[1][i][j][k] + dttz2 * speed[i][j][k-1]; lhs[2+10][i][j][k] = lhs[2][i][j][k]; lhs[3+10][i][j][k] = lhs[3][i][j][k] - dttz2 * speed[i][j][k+1]; lhs[4+10][i][j][k] = lhs[4][i][j][k]; } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void ninvr(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c block-diagonal matrix-vector multiplication c-------------------------------------------------------------------*/ int i, j, k; double r1, r2, r3, r4, r5, t1, t2; #pragma omp parallel for private(r1 ,r2 ,r3 ,r4 ,r5 ,t1 ,t2 ) for (i = 1; i <= grid_points[0]-2; i++) { #pragma omp parallel for private(r1 ,r2 ,r3 ,r4 ,r5 ,t1 ,t2) for (j = 1; j <= grid_points[1]-2; j++) { #pragma omp parallel for private(r1 ,r2 ,r3 ,r4 ,r5 ,t1 ,t2) for (k = 1; k <= grid_points[2]-2; k++) { r1 = rhs[0][i][j][k]; r2 = rhs[1][i][j][k]; r3 = rhs[2][i][j][k]; r4 = rhs[3][i][j][k]; r5 = rhs[4][i][j][k]; t1 = bt * r3; t2 = 0.5 * ( r4 + r5 ); rhs[0][i][j][k] = -r2; rhs[1][i][j][k] = r1; rhs[2][i][j][k] = bt * ( r4 - r5 ); rhs[3][i][j][k] = -t1 + t2; rhs[4][i][j][k] = t1 + t2; } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void pinvr(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c block-diagonal matrix-vector multiplication c-------------------------------------------------------------------*/ int i, j, k; double r1, r2, r3, r4, r5, t1, t2; #pragma omp parallel for private(r1 ,r2 ,r3 ,r4 ,r5 ,t1 ,t2 ) for (i = 1; i <= grid_points[0]-2; i++) { #pragma omp parallel for private(r1 ,r2 ,r3 ,r4 ,r5 ,t1 ,t2 ) for (j = 1; j <= grid_points[1]-2; j++) { #pragma omp parallel for private(r1 ,r2 ,r3 ,r4 ,r5 ,t1 ,t2 ) for (k = 1; k <= grid_points[2]-2; k++) { r1 = rhs[0][i][j][k]; r2 = rhs[1][i][j][k]; r3 = rhs[2][i][j][k]; r4 = rhs[3][i][j][k]; r5 = rhs[4][i][j][k]; t1 = bt * r1; t2 = 0.5 * ( r4 + r5 ); rhs[0][i][j][k] = bt * ( r4 - r5 ); rhs[1][i][j][k] = -r3; rhs[2][i][j][k] = r2; rhs[3][i][j][k] = -t1 + t2; rhs[4][i][j][k] = t1 + t2; } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void compute_rhs(void) { { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ int i, j, k, m; double aux, rho_inv, uijk, up1, um1, vijk, vp1, vm1, wijk, wp1, wm1; /*-------------------------------------------------------------------- c compute the reciprocal of density, and the kinetic energy, c and the speed of sound. c-------------------------------------------------------------------*/ #pragma omp parallel for private(rho_inv ,aux) for (i = 0; i <= grid_points[0]-1; i++) { #pragma omp parallel for private(rho_inv ,aux) for (j = 0; j <= grid_points[1]-1; j++) { #pragma omp parallel for private(rho_inv ,aux) for (k = 0; k <= grid_points[2]-1; k++) { rho_inv = 1.0/u[0][i][j][k]; rho_i[i][j][k] = rho_inv; us[i][j][k] = u[1][i][j][k] * rho_inv; vs[i][j][k] = u[2][i][j][k] * rho_inv; ws[i][j][k] = u[3][i][j][k] * rho_inv; square[i][j][k] = 0.5* (u[1][i][j][k]*u[1][i][j][k] + u[2][i][j][k]*u[2][i][j][k] + u[3][i][j][k]*u[3][i][j][k] ) * rho_inv; qs[i][j][k] = square[i][j][k] * rho_inv; /*-------------------------------------------------------------------- c (do not need speed and ainx until the lhs computation) c-------------------------------------------------------------------*/ aux = c1c2*rho_inv* (u[4][i][j][k] - square[i][j][k]); aux = sqrt(aux); speed[i][j][k] = aux; ainv[i][j][k] = 1.0/aux; } } } /*-------------------------------------------------------------------- c copy the exact forcing term to the right hand side; because c this forcing term is known, we can store it on the whole grid c including the boundary c-------------------------------------------------------------------*/ #pragma omp parallel for for (m = 0; m < 5; m++) { #pragma omp parallel for for (i = 0; i <= grid_points[0]-1; i++) { #pragma omp parallel for for (j = 0; j <= grid_points[1]-1; j++) { #pragma omp parallel for for (k = 0; k <= grid_points[2]-1; k++) { rhs[m][i][j][k] = forcing[m][i][j][k]; } } } } /*-------------------------------------------------------------------- c compute xi-direction fluxes c-------------------------------------------------------------------*/ #pragma omp parallel for private(uijk, up1, um1) for (i = 1; i <= grid_points[0]-2; i++) { #pragma omp parallel for private(uijk, up1, um1) for (j = 1; j <= grid_points[1]-2; j++) { #pragma omp parallel for private(uijk, up1, um1) for (k = 1; k <= grid_points[2]-2; k++) { uijk = us[i][j][k]; up1 = us[i+1][j][k]; um1 = us[i-1][j][k]; rhs[0][i][j][k] = rhs[0][i][j][k] + dx1tx1 * (u[0][i+1][j][k] - 2.0*u[0][i][j][k] + u[0][i-1][j][k]) - tx2 * (u[1][i+1][j][k] - u[1][i-1][j][k]); rhs[1][i][j][k] = rhs[1][i][j][k] + dx2tx1 * (u[1][i+1][j][k] - 2.0*u[1][i][j][k] + u[1][i-1][j][k]) + xxcon2*con43 * (up1 - 2.0*uijk + um1) - tx2 * (u[1][i+1][j][k]*up1 - u[1][i-1][j][k]*um1 + (u[4][i+1][j][k]- square[i+1][j][k]- u[4][i-1][j][k]+ square[i-1][j][k])* c2); rhs[2][i][j][k] = rhs[2][i][j][k] + dx3tx1 * (u[2][i+1][j][k] - 2.0*u[2][i][j][k] + u[2][i-1][j][k]) + xxcon2 * (vs[i+1][j][k] - 2.0*vs[i][j][k] + vs[i-1][j][k]) - tx2 * (u[2][i+1][j][k]*up1 - u[2][i-1][j][k]*um1); rhs[3][i][j][k] = rhs[3][i][j][k] + dx4tx1 * (u[3][i+1][j][k] - 2.0*u[3][i][j][k] + u[3][i-1][j][k]) + xxcon2 * (ws[i+1][j][k] - 2.0*ws[i][j][k] + ws[i-1][j][k]) - tx2 * (u[3][i+1][j][k]*up1 - u[3][i-1][j][k]*um1); rhs[4][i][j][k] = rhs[4][i][j][k] + dx5tx1 * (u[4][i+1][j][k] - 2.0*u[4][i][j][k] + u[4][i-1][j][k]) + xxcon3 * (qs[i+1][j][k] - 2.0*qs[i][j][k] + qs[i-1][j][k]) + xxcon4 * (up1*up1 - 2.0*uijk*uijk + um1*um1) + xxcon5 * (u[4][i+1][j][k]*rho_i[i+1][j][k] - 2.0*u[4][i][j][k]*rho_i[i][j][k] + u[4][i-1][j][k]*rho_i[i-1][j][k]) - tx2 * ( (c1*u[4][i+1][j][k] - c2*square[i+1][j][k])*up1 - (c1*u[4][i-1][j][k] - c2*square[i-1][j][k])*um1 ); } } } /*-------------------------------------------------------------------- c add fourth order xi-direction dissipation c-------------------------------------------------------------------*/ i = 1; #pragma omp parallel for for (m = 0; m < 5; m++) { #pragma omp parallel for for (j = 1; j <= grid_points[1]-2; j++) { #pragma omp parallel for for (k = 1; k <= grid_points[2]-2; k++) { rhs[m][i][j][k] = rhs[m][i][j][k]- dssp * ( 5.0*u[m][i][j][k] - 4.0*u[m][i+1][j][k] + u[m][i+2][j][k]); } } } i = 2; #pragma omp parallel for for (m = 0; m < 5; m++) { #pragma omp parallel for for (j = 1; j <= grid_points[1]-2; j++) { #pragma omp parallel for for (k = 1; k <= grid_points[2]-2; k++) { rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * (-4.0*u[m][i-1][j][k] + 6.0*u[m][i][j][k] - 4.0*u[m][i+1][j][k] + u[m][i+2][j][k]); } } } #pragma omp parallel for for (m = 0; m < 5; m++) { #pragma omp parallel for for (i = 3*1; i <= grid_points[0]-3*1-1; i++) { #pragma omp parallel for for (j = 1; j <= grid_points[1]-2; j++) { #pragma omp parallel for for (k = 1; k <= grid_points[2]-2; k++) { rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * ( u[m][i-2][j][k] - 4.0*u[m][i-1][j][k] + 6.0*u[m][i][j][k] - 4.0*u[m][i+1][j][k] + u[m][i+2][j][k] ); } } } } i = grid_points[0]-3; #pragma omp parallel for for (m = 0; m < 5; m++) { #pragma omp parallel for for (j = 1; j <= grid_points[1]-2; j++) { #pragma omp parallel for for (k = 1; k <= grid_points[2]-2; k++) { rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * ( u[m][i-2][j][k] - 4.0*u[m][i-1][j][k] + 6.0*u[m][i][j][k] - 4.0*u[m][i+1][j][k] ); } } } i = grid_points[0]-2; #pragma omp parallel for for (m = 0; m < 5; m++) { #pragma omp parallel for for (j = 1; j <= grid_points[1]-2; j++) { #pragma omp parallel for for (k = 1; k <= grid_points[2]-2; k++) { rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * ( u[m][i-2][j][k] - 4.0*u[m][i-1][j][k] + 5.0*u[m][i][j][k] ); } } } /*-------------------------------------------------------------------- c compute eta-direction fluxes c-------------------------------------------------------------------*/ #pragma omp parallel for private(vijk, vp1, vm1) for (i = 1; i <= grid_points[0]-2; i++) { #pragma omp parallel for private(vijk, vp1, vm1) for (j = 1; j <= grid_points[1]-2; j++) { #pragma omp parallel for private(vijk, vp1, vm1) for (k = 1; k <= grid_points[2]-2; k++) { vijk = vs[i][j][k]; vp1 = vs[i][j+1][k]; vm1 = vs[i][j-1][k]; rhs[0][i][j][k] = rhs[0][i][j][k] + dy1ty1 * (u[0][i][j+1][k] - 2.0*u[0][i][j][k] + u[0][i][j-1][k]) - ty2 * (u[2][i][j+1][k] - u[2][i][j-1][k]); rhs[1][i][j][k] = rhs[1][i][j][k] + dy2ty1 * (u[1][i][j+1][k] - 2.0*u[1][i][j][k] + u[1][i][j-1][k]) + yycon2 * (us[i][j+1][k] - 2.0*us[i][j][k] + us[i][j-1][k]) - ty2 * (u[1][i][j+1][k]*vp1 - u[1][i][j-1][k]*vm1); rhs[2][i][j][k] = rhs[2][i][j][k] + dy3ty1 * (u[2][i][j+1][k] - 2.0*u[2][i][j][k] + u[2][i][j-1][k]) + yycon2*con43 * (vp1 - 2.0*vijk + vm1) - ty2 * (u[2][i][j+1][k]*vp1 - u[2][i][j-1][k]*vm1 + (u[4][i][j+1][k] - square[i][j+1][k] - u[4][i][j-1][k] + square[i][j-1][k]) *c2); rhs[3][i][j][k] = rhs[3][i][j][k] + dy4ty1 * (u[3][i][j+1][k] - 2.0*u[3][i][j][k] + u[3][i][j-1][k]) + yycon2 * (ws[i][j+1][k] - 2.0*ws[i][j][k] + ws[i][j-1][k]) - ty2 * (u[3][i][j+1][k]*vp1 - u[3][i][j-1][k]*vm1); rhs[4][i][j][k] = rhs[4][i][j][k] + dy5ty1 * (u[4][i][j+1][k] - 2.0*u[4][i][j][k] + u[4][i][j-1][k]) + yycon3 * (qs[i][j+1][k] - 2.0*qs[i][j][k] + qs[i][j-1][k]) + yycon4 * (vp1*vp1 - 2.0*vijk*vijk + vm1*vm1) + yycon5 * (u[4][i][j+1][k]*rho_i[i][j+1][k] - 2.0*u[4][i][j][k]*rho_i[i][j][k] + u[4][i][j-1][k]*rho_i[i][j-1][k]) - ty2 * ((c1*u[4][i][j+1][k] - c2*square[i][j+1][k]) * vp1 - (c1*u[4][i][j-1][k] - c2*square[i][j-1][k]) * vm1); } } } /*-------------------------------------------------------------------- c add fourth order eta-direction dissipation c-------------------------------------------------------------------*/ j = 1; #pragma omp parallel for for (m = 0; m < 5; m++) { #pragma omp parallel for for (i = 1; i <= grid_points[0]-2; i++) { #pragma omp parallel for for (k = 1; k <= grid_points[2]-2; k++) { rhs[m][i][j][k] = rhs[m][i][j][k]- dssp * ( 5.0*u[m][i][j][k] - 4.0*u[m][i][j+1][k] + u[m][i][j+2][k]); } } } j = 2; #pragma omp parallel for for (m = 0; m < 5; m++) { #pragma omp parallel for for (i = 1; i <= grid_points[0]-2; i++) { #pragma omp parallel for for (k = 1; k <= grid_points[2]-2; k++) { rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * (-4.0*u[m][i][j-1][k] + 6.0*u[m][i][j][k] - 4.0*u[m][i][j+1][k] + u[m][i][j+2][k]); } } } #pragma omp parallel for for (m = 0; m < 5; m++) { #pragma omp parallel for for (i = 1; i <= grid_points[0]-2; i++) { #pragma omp parallel for for (j = 3*1; j <= grid_points[1]-3*1-1; j++) { #pragma omp parallel for for (k = 1; k <= grid_points[2]-2; k++) { rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * ( u[m][i][j-2][k] - 4.0*u[m][i][j-1][k] + 6.0*u[m][i][j][k] - 4.0*u[m][i][j+1][k] + u[m][i][j+2][k] ); } } } } j = grid_points[1]-3; #pragma omp parallel for for (m = 0; m < 5; m++) { #pragma omp parallel for for (i = 1; i <= grid_points[0]-2; i++) { #pragma omp parallel for for (k = 1; k <= grid_points[2]-2; k++) { rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * ( u[m][i][j-2][k] - 4.0*u[m][i][j-1][k] + 6.0*u[m][i][j][k] - 4.0*u[m][i][j+1][k] ); } } } j = grid_points[1]-2; #pragma omp parallel for for (m = 0; m < 5; m++) { #pragma omp parallel for for (i = 1; i <= grid_points[0]-2; i++) { #pragma omp parallel for for (k = 1; k <= grid_points[2]-2; k++) { rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * ( u[m][i][j-2][k] - 4.0*u[m][i][j-1][k] + 5.0*u[m][i][j][k] ); } } } /*-------------------------------------------------------------------- c compute zeta-direction fluxes c-------------------------------------------------------------------*/ #pragma omp parallel for private(wijk, wp1, wm1) for (i = 1; i <= grid_points[0]-2; i++) { #pragma omp parallel for private(wijk, wp1, wm1) for (j = 1; j <= grid_points[1]-2; j++) { #pragma omp parallel for private(wijk, wp1, wm1) for (k = 1; k <= grid_points[2]-2; k++) { wijk = ws[i][j][k]; wp1 = ws[i][j][k+1]; wm1 = ws[i][j][k-1]; rhs[0][i][j][k] = rhs[0][i][j][k] + dz1tz1 * (u[0][i][j][k+1] - 2.0*u[0][i][j][k] + u[0][i][j][k-1]) - tz2 * (u[3][i][j][k+1] - u[3][i][j][k-1]); rhs[1][i][j][k] = rhs[1][i][j][k] + dz2tz1 * (u[1][i][j][k+1] - 2.0*u[1][i][j][k] + u[1][i][j][k-1]) + zzcon2 * (us[i][j][k+1] - 2.0*us[i][j][k] + us[i][j][k-1]) - tz2 * (u[1][i][j][k+1]*wp1 - u[1][i][j][k-1]*wm1); rhs[2][i][j][k] = rhs[2][i][j][k] + dz3tz1 * (u[2][i][j][k+1] - 2.0*u[2][i][j][k] + u[2][i][j][k-1]) + zzcon2 * (vs[i][j][k+1] - 2.0*vs[i][j][k] + vs[i][j][k-1]) - tz2 * (u[2][i][j][k+1]*wp1 - u[2][i][j][k-1]*wm1); rhs[3][i][j][k] = rhs[3][i][j][k] + dz4tz1 * (u[3][i][j][k+1] - 2.0*u[3][i][j][k] + u[3][i][j][k-1]) + zzcon2*con43 * (wp1 - 2.0*wijk + wm1) - tz2 * (u[3][i][j][k+1]*wp1 - u[3][i][j][k-1]*wm1 + (u[4][i][j][k+1] - square[i][j][k+1] - u[4][i][j][k-1] + square[i][j][k-1]) *c2); rhs[4][i][j][k] = rhs[4][i][j][k] + dz5tz1 * (u[4][i][j][k+1] - 2.0*u[4][i][j][k] + u[4][i][j][k-1]) + zzcon3 * (qs[i][j][k+1] - 2.0*qs[i][j][k] + qs[i][j][k-1]) + zzcon4 * (wp1*wp1 - 2.0*wijk*wijk + wm1*wm1) + zzcon5 * (u[4][i][j][k+1]*rho_i[i][j][k+1] - 2.0*u[4][i][j][k]*rho_i[i][j][k] + u[4][i][j][k-1]*rho_i[i][j][k-1]) - tz2 * ( (c1*u[4][i][j][k+1] - c2*square[i][j][k+1])*wp1 - (c1*u[4][i][j][k-1] - c2*square[i][j][k-1])*wm1); } } } /*-------------------------------------------------------------------- c add fourth order zeta-direction dissipation c-------------------------------------------------------------------*/ k = 1; #pragma omp parallel for for (m = 0; m < 5; m++) { #pragma omp parallel for for (i = 1; i <= grid_points[0]-2; i++) { #pragma omp parallel for for (j = 1; j <= grid_points[1]-2; j++) { rhs[m][i][j][k] = rhs[m][i][j][k]- dssp * ( 5.0*u[m][i][j][k] - 4.0*u[m][i][j][k+1] + u[m][i][j][k+2]); } } } k = 2; #pragma omp parallel for for (m = 0; m < 5; m++) { #pragma omp parallel for for (i = 1; i <= grid_points[0]-2; i++) { #pragma omp parallel for for (j = 1; j <= grid_points[1]-2; j++) { rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * (-4.0*u[m][i][j][k-1] + 6.0*u[m][i][j][k] - 4.0*u[m][i][j][k+1] + u[m][i][j][k+2]); } } } #pragma omp parallel for for (m = 0; m < 5; m++) { #pragma omp parallel for for (i = 1; i <= grid_points[0]-2; i++) { #pragma omp parallel for for (j = 1; j <= grid_points[1]-2; j++) { #pragma omp parallel for for (k = 3*1; k <= grid_points[2]-3*1-1; k++) { rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * ( u[m][i][j][k-2] - 4.0*u[m][i][j][k-1] + 6.0*u[m][i][j][k] - 4.0*u[m][i][j][k+1] + u[m][i][j][k+2] ); } } } } k = grid_points[2]-3; #pragma omp parallel for for (m = 0; m < 5; m++) { #pragma omp parallel for for (i = 1; i <= grid_points[0]-2; i++) { #pragma omp parallel for for (j = 1; j <= grid_points[1]-2; j++) { rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * ( u[m][i][j][k-2] - 4.0*u[m][i][j][k-1] + 6.0*u[m][i][j][k] - 4.0*u[m][i][j][k+1] ); } } } k = grid_points[2]-2; #pragma omp parallel for for (m = 0; m < 5; m++) { #pragma omp parallel for for (i = 1; i <= grid_points[0]-2; i++) { #pragma omp parallel for for (j = 1; j <= grid_points[1]-2; j++) { rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * ( u[m][i][j][k-2] - 4.0*u[m][i][j][k-1] + 5.0*u[m][i][j][k] ); } } } #pragma omp parallel for for (m = 0; m < 5; m++) { #pragma omp parallel for for (i = 1; i <= grid_points[0]-2; i++) { #pragma omp parallel for for (j = 1; j <= grid_points[1]-2; j++) { #pragma omp parallel for for (k = 1; k <= grid_points[2]-2; k++) { rhs[m][i][j][k] = rhs[m][i][j][k] * dt; } } } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void set_constants(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ ce[0][0] = 2.0; ce[1][0] = 0.0; ce[2][0] = 0.0; ce[3][0] = 4.0; ce[4][0] = 5.0; ce[5][0] = 3.0; ce[6][0] = 0.5; ce[7][0] = 0.02; ce[8][0] = 0.01; ce[9][0] = 0.03; ce[10][0] = 0.5; ce[11][0] = 0.4; ce[12][0] = 0.3; ce[0][1] = 1.0; ce[1][1] = 0.0; ce[2][1] = 0.0; ce[3][1] = 0.0; ce[4][1] = 1.0; ce[5][1] = 2.0; ce[6][1] = 3.0; ce[7][1] = 0.01; ce[8][1] = 0.03; ce[9][1] = 0.02; ce[10][1] = 0.4; ce[11][1] = 0.3; ce[12][1] = 0.5; ce[0][2] = 2.0; ce[1][2] = 2.0; ce[2][2] = 0.0; ce[3][2] = 0.0; ce[4][2] = 0.0; ce[5][2] = 2.0; ce[6][2] = 3.0; ce[7][2] = 0.04; ce[8][2] = 0.03; ce[9][2] = 0.05; ce[10][2] = 0.3; ce[11][2] = 0.5; ce[12][2] = 0.4; ce[0][3] = 2.0; ce[1][3] = 2.0; ce[2][3] = 0.0; ce[3][3] = 0.0; ce[4][3] = 0.0; ce[5][3] = 2.0; ce[6][3] = 3.0; ce[7][3] = 0.03; ce[8][3] = 0.05; ce[9][3] = 0.04; ce[10][3] = 0.2; ce[11][3] = 0.1; ce[12][3] = 0.3; ce[0][4] = 5.0; ce[1][4] = 4.0; ce[2][4] = 3.0; ce[3][4] = 2.0; ce[4][4] = 0.1; ce[5][4] = 0.4; ce[6][4] = 0.3; ce[7][4] = 0.05; ce[8][4] = 0.04; ce[9][4] = 0.03; ce[10][4] = 0.1; ce[11][4] = 0.3; ce[12][4] = 0.2; c1 = 1.4; c2 = 0.4; c3 = 0.1; c4 = 1.0; c5 = 1.4; bt = sqrt(0.5); dnxm1 = 1.0 / (double)(grid_points[0]-1); dnym1 = 1.0 / (double)(grid_points[1]-1); dnzm1 = 1.0 / (double)(grid_points[2]-1); c1c2 = c1 * c2; c1c5 = c1 * c5; c3c4 = c3 * c4; c1345 = c1c5 * c3c4; conz1 = (1.0-c1c5); tx1 = 1.0 / (dnxm1 * dnxm1); tx2 = 1.0 / (2.0 * dnxm1); tx3 = 1.0 / dnxm1; ty1 = 1.0 / (dnym1 * dnym1); ty2 = 1.0 / (2.0 * dnym1); ty3 = 1.0 / dnym1; tz1 = 1.0 / (dnzm1 * dnzm1); tz2 = 1.0 / (2.0 * dnzm1); tz3 = 1.0 / dnzm1; dx1 = 0.75; dx2 = 0.75; dx3 = 0.75; dx4 = 0.75; dx5 = 0.75; dy1 = 0.75; dy2 = 0.75; dy3 = 0.75; dy4 = 0.75; dy5 = 0.75; dz1 = 1.0; dz2 = 1.0; dz3 = 1.0; dz4 = 1.0; dz5 = 1.0; dxmax = max(dx3, dx4); dymax = max(dy2, dy4); dzmax = max(dz2, dz3); dssp = 0.25 * max(dx1, max(dy1, dz1) ); c4dssp = 4.0 * dssp; c5dssp = 5.0 * dssp; dttx1 = dt*tx1; dttx2 = dt*tx2; dtty1 = dt*ty1; dtty2 = dt*ty2; dttz1 = dt*tz1; dttz2 = dt*tz2; c2dttx1 = 2.0*dttx1; c2dtty1 = 2.0*dtty1; c2dttz1 = 2.0*dttz1; dtdssp = dt*dssp; comz1 = dtdssp; comz4 = 4.0*dtdssp; comz5 = 5.0*dtdssp; comz6 = 6.0*dtdssp; c3c4tx3 = c3c4*tx3; c3c4ty3 = c3c4*ty3; c3c4tz3 = c3c4*tz3; dx1tx1 = dx1*tx1; dx2tx1 = dx2*tx1; dx3tx1 = dx3*tx1; dx4tx1 = dx4*tx1; dx5tx1 = dx5*tx1; dy1ty1 = dy1*ty1; dy2ty1 = dy2*ty1; dy3ty1 = dy3*ty1; dy4ty1 = dy4*ty1; dy5ty1 = dy5*ty1; dz1tz1 = dz1*tz1; dz2tz1 = dz2*tz1; dz3tz1 = dz3*tz1; dz4tz1 = dz4*tz1; dz5tz1 = dz5*tz1; c2iv = 2.5; con43 = 4.0/3.0; con16 = 1.0/6.0; xxcon1 = c3c4tx3*con43*tx3; xxcon2 = c3c4tx3*tx3; xxcon3 = c3c4tx3*conz1*tx3; xxcon4 = c3c4tx3*con16*tx3; xxcon5 = c3c4tx3*c1c5*tx3; yycon1 = c3c4ty3*con43*ty3; yycon2 = c3c4ty3*ty3; yycon3 = c3c4ty3*conz1*ty3; yycon4 = c3c4ty3*con16*ty3; yycon5 = c3c4ty3*c1c5*ty3; zzcon1 = c3c4tz3*con43*tz3; zzcon2 = c3c4tz3*tz3; zzcon3 = c3c4tz3*conz1*tz3; zzcon4 = c3c4tz3*con16*tz3; zzcon5 = c3c4tz3*c1c5*tz3; } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void txinvr(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c block-diagonal matrix-vector multiplication --------------------------------------------------------------------*/ int i, j, k; double t1, t2, t3, ac, ru1, uu, vv, ww, r1, r2, r3, r4, r5, ac2inv; #pragma omp parallel for private(ru1, uu, vv, ww, ac, ac2inv, r1 ,r2 ,r3 ,r4 ,t1 ,t2 ,t3) for (i = 1; i <= grid_points[0]-2; i++) { #pragma omp parallel for private(ru1, uu, vv, ww, ac, ac2inv, r1 ,r2 ,r3 ,r4 ,t1 ,t2 ,t3) for (j = 1; j <= grid_points[1]-2; j++) { #pragma omp parallel for private(ru1, uu, vv, ww, ac, ac2inv, r1 ,r2 ,r3 ,r4 ,t1 ,t2 ,t3) for (k = 1; k <= grid_points[2]-2; k++) { ru1 = rho_i[i][j][k]; uu = us[i][j][k]; vv = vs[i][j][k]; ww = ws[i][j][k]; ac = speed[i][j][k]; ac2inv = ainv[i][j][k]*ainv[i][j][k]; r1 = rhs[0][i][j][k]; r2 = rhs[1][i][j][k]; r3 = rhs[2][i][j][k]; r4 = rhs[3][i][j][k]; r5 = rhs[4][i][j][k]; t1 = c2 * ac2inv * ( qs[i][j][k]*r1 - uu*r2 - vv*r3 - ww*r4 + r5 ); t2 = bt * ru1 * ( uu * r1 - r2 ); t3 = ( bt * ru1 * ac ) * t1; rhs[0][i][j][k] = r1 - t1; rhs[1][i][j][k] = - ru1 * ( ww*r1 - r4 ); rhs[2][i][j][k] = ru1 * ( vv*r1 - r3 ); rhs[3][i][j][k] = - t2 + t3; rhs[4][i][j][k] = t2 + t3; } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void tzetar(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c block-diagonal matrix-vector multiplication c-------------------------------------------------------------------*/ int i, j, k; double t1, t2, t3, ac, xvel, yvel, zvel, r1, r2, r3, r4, r5, btuz, acinv, ac2u, uzik1; #pragma omp for private(t1 ,t2 ,t3 ,ac ,xvel ,yvel ,zvel ,r1 ,r2 ,r3 ,r4 ,r5 ,btuz ,ac2u ,uzik1 ) for (i = 1; i <= grid_points[0]-2; i++) { #pragma omp parallel for private(t1 ,t2 ,t3 ,ac ,xvel ,yvel ,zvel ,r1 ,r2 ,r3 ,r4 ,r5 ,btuz ,ac2u ,uzik1 ) for (j = 1; j <= grid_points[1]-2; j++) { #pragma omp parallel for private(t1 ,t2 ,t3 ,ac ,xvel ,yvel ,zvel ,r1 ,r2 ,r3 ,r4 ,r5 ,btuz ,ac2u ,uzik1 ) for (k = 1; k <= grid_points[2]-2; k++) { xvel = us[i][j][k]; yvel = vs[i][j][k]; zvel = ws[i][j][k]; ac = speed[i][j][k]; acinv = ainv[i][j][k]; ac2u = ac*ac; r1 = rhs[0][i][j][k]; r2 = rhs[1][i][j][k]; r3 = rhs[2][i][j][k]; r4 = rhs[3][i][j][k]; r5 = rhs[4][i][j][k]; uzik1 = u[0][i][j][k]; btuz = bt * uzik1; t1 = btuz*acinv * (r4 + r5); t2 = r3 + t1; t3 = btuz * (r4 - r5); rhs[0][i][j][k] = t2; rhs[1][i][j][k] = -uzik1*r2 + xvel*t2; rhs[2][i][j][k] = uzik1*r1 + yvel*t2; rhs[3][i][j][k] = zvel*t2 + t3; rhs[4][i][j][k] = uzik1*(-xvel*r2 + yvel*r1) + qs[i][j][k]*t2 + c2iv*ac2u*t1 + zvel*t3; } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void verify(int no_time_steps, char *class, boolean *verified) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c verification routine --------------------------------------------------------------------*/ double xcrref[5],xceref[5],xcrdif[5],xcedif[5], epsilon, xce[5], xcr[5], dtref; int m; /*-------------------------------------------------------------------- c tolerance level --------------------------------------------------------------------*/ epsilon = 1.0e-08; /*-------------------------------------------------------------------- c compute the error norm and the residual norm, and exit if not printing --------------------------------------------------------------------*/ error_norm(xce); compute_rhs(); rhs_norm(xcr); #pragma omp parallel for for (m = 0; m < 5; m++) { xcr[m] = xcr[m] / dt; } *class = 'U'; *verified = TRUE; #pragma omp parallel for for (m = 0; m < 5; m++) { xcrref[m] = 1.0; xceref[m] = 1.0; } /*-------------------------------------------------------------------- c reference data for 12X12X12 grids after 100 time steps, with DT = 1.50d-02 --------------------------------------------------------------------*/ if ( grid_points[0] == 12 && grid_points[1] == 12 && grid_points[2] == 12 && no_time_steps == 100) { *class = 'S'; dtref = 1.5e-2; /*-------------------------------------------------------------------- c Reference values of RMS-norms of residual. --------------------------------------------------------------------*/ xcrref[0] = 2.7470315451339479e-02; xcrref[1] = 1.0360746705285417e-02; xcrref[2] = 1.6235745065095532e-02; xcrref[3] = 1.5840557224455615e-02; xcrref[4] = 3.4849040609362460e-02; /*-------------------------------------------------------------------- c Reference values of RMS-norms of solution error. --------------------------------------------------------------------*/ xceref[0] = 2.7289258557377227e-05; xceref[1] = 1.0364446640837285e-05; xceref[2] = 1.6154798287166471e-05; xceref[3] = 1.5750704994480102e-05; xceref[4] = 3.4177666183390531e-05; /*-------------------------------------------------------------------- c reference data for 36X36X36 grids after 400 time steps, with DT = 1.5d-03 --------------------------------------------------------------------*/ } else if (grid_points[0] == 36 && grid_points[1] == 36 && grid_points[2] == 36 && no_time_steps == 400) { *class = 'W'; dtref = 1.5e-3; /*-------------------------------------------------------------------- c Reference values of RMS-norms of residual. --------------------------------------------------------------------*/ xcrref[0] = 0.1893253733584e-02; xcrref[1] = 0.1717075447775e-03; xcrref[2] = 0.2778153350936e-03; xcrref[3] = 0.2887475409984e-03; xcrref[4] = 0.3143611161242e-02; /*-------------------------------------------------------------------- c Reference values of RMS-norms of solution error. --------------------------------------------------------------------*/ xceref[0] = 0.7542088599534e-04; xceref[1] = 0.6512852253086e-05; xceref[2] = 0.1049092285688e-04; xceref[3] = 0.1128838671535e-04; xceref[4] = 0.1212845639773e-03; /*-------------------------------------------------------------------- c reference data for 64X64X64 grids after 400 time steps, with DT = 1.5d-03 --------------------------------------------------------------------*/ } else if (grid_points[0] == 64 && grid_points[1] == 64 && grid_points[2] == 64 && no_time_steps == 400 ) { *class = 'A'; dtref = 1.5e-3; /*-------------------------------------------------------------------- c Reference values of RMS-norms of residual. --------------------------------------------------------------------*/ xcrref[0] = 2.4799822399300195; xcrref[1] = 1.1276337964368832; xcrref[2] = 1.5028977888770491; xcrref[3] = 1.4217816211695179; xcrref[4] = 2.1292113035138280; /*-------------------------------------------------------------------- c Reference values of RMS-norms of solution error. --------------------------------------------------------------------*/ xceref[0] = 1.0900140297820550e-04; xceref[1] = 3.7343951769282091e-05; xceref[2] = 5.0092785406541633e-05; xceref[3] = 4.7671093939528255e-05; xceref[4] = 1.3621613399213001e-04; /*-------------------------------------------------------------------- c reference data for 102X102X102 grids after 400 time steps, c with DT = 1.0d-03 --------------------------------------------------------------------*/ } else if (grid_points[0] == 102 && grid_points[1] == 102 && grid_points[2] == 102 && no_time_steps == 400) { *class = 'B'; dtref = 1.0e-3; /*-------------------------------------------------------------------- c Reference values of RMS-norms of residual. --------------------------------------------------------------------*/ xcrref[0] = 0.6903293579998e+02; xcrref[1] = 0.3095134488084e+02; xcrref[2] = 0.4103336647017e+02; xcrref[3] = 0.3864769009604e+02; xcrref[4] = 0.5643482272596e+02; /*-------------------------------------------------------------------- c Reference values of RMS-norms of solution error. --------------------------------------------------------------------*/ xceref[0] = 0.9810006190188e-02; xceref[1] = 0.1022827905670e-02; xceref[2] = 0.1720597911692e-02; xceref[3] = 0.1694479428231e-02; xceref[4] = 0.1847456263981e-01; /*-------------------------------------------------------------------- c reference data for 162X162X162 grids after 400 time steps, c with DT = 0.67d-03 --------------------------------------------------------------------*/ } else if (grid_points[0] == 162 && grid_points[1] == 162 && grid_points[2] == 162 && no_time_steps == 400) { *class = 'C'; dtref = 0.67e-3; /*-------------------------------------------------------------------- c Reference values of RMS-norms of residual. --------------------------------------------------------------------*/ xcrref[0] = 0.5881691581829e+03; xcrref[1] = 0.2454417603569e+03; xcrref[2] = 0.3293829191851e+03; xcrref[3] = 0.3081924971891e+03; xcrref[4] = 0.4597223799176e+03; /*-------------------------------------------------------------------- c Reference values of RMS-norms of solution error. --------------------------------------------------------------------*/ xceref[0] = 0.2598120500183e+00; xceref[1] = 0.2590888922315e-01; xceref[2] = 0.5132886416320e-01; xceref[3] = 0.4806073419454e-01; xceref[4] = 0.5483377491301e+00; } else { *verified = FALSE; } /*-------------------------------------------------------------------- c verification test for residuals if gridsize is either 12X12X12 or c 64X64X64 or 102X102X102 or 162X162X162 --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c Compute the difference of solution values and the known reference values. --------------------------------------------------------------------*/ #pragma omp parallel for for (m = 0; m < 5; m++) { xcrdif[m] = fabs((xcr[m]-xcrref[m])/xcrref[m]) ; xcedif[m] = fabs((xce[m]-xceref[m])/xceref[m]); } /*-------------------------------------------------------------------- c Output the comparison of computed results to known cases. --------------------------------------------------------------------*/ if (*class != 'U') { printf(" Verification being performed for class %1c\n", *class); printf(" accuracy setting for epsilon = %20.13e\n", epsilon); if (fabs(dt-dtref) > epsilon) { *verified = FALSE; *class = 'U'; printf(" DT does not match the reference value of %15.8e\n", dtref); } } else { printf(" Unknown class\n"); } if (*class != 'U') { printf(" Comparison of RMS-norms of residual\n"); } else { printf(" RMS-norms of residual\n"); } for (m = 0; m < 5; m++) { if (*class == 'U') { printf(" %2d%20.13e\n", m, xcr[m]); } else if (xcrdif[m] > epsilon) { *verified = FALSE; printf(" FAILURE: %2d%20.13e%20.13e%20.13e\n", m,xcr[m],xcrref[m],xcrdif[m]); } else { printf(" %2d%20.13e%20.13e%20.13e\n", m,xcr[m],xcrref[m],xcrdif[m]); } } if (*class != 'U') { printf(" Comparison of RMS-norms of solution error\n"); } else { printf(" RMS-norms of solution error\n"); } for (m = 0; m < 5; m++) { if (*class == 'U') { printf(" %2d%20.13e\n", m, xce[m]); } else if (xcedif[m] > epsilon) { *verified = FALSE; printf(" FAILURE: %2d%20.13e%20.13e%20.13e\n", m,xce[m],xceref[m],xcedif[m]); } else { printf(" %2d%20.13e%20.13e%20.13e\n", m,xce[m],xceref[m],xcedif[m]); } } if (*class == 'U') { printf(" No reference values provided\n"); printf(" No verification performed\n"); } else if (*verified) { printf(" Verification Successful\n"); } else { printf(" Verification failed\n"); } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void x_solve(void) { { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c this function performs the solution of the approximate factorization c step in the x-direction for all five matrix components c simultaneously. The Thomas algorithm is employed to solve the c systems for the x-lines. Boundary conditions are non-periodic --------------------------------------------------------------------*/ int i, j, k, n, i1, i2, m; double fac1, fac2; /*-------------------------------------------------------------------- c FORWARD ELIMINATION --------------------------------------------------------------------*/ lhsx(); /*-------------------------------------------------------------------- c perform the Thomas algorithm; first, FORWARD ELIMINATION --------------------------------------------------------------------*/ n = 0; for (i = 0; i <= grid_points[0]-3; i++) { i1 = i + 1; i2 = i + 2; #pragma omp parallel for private(fac1, m) for (j = 1; j <= grid_points[1]-2; j++) { #pragma omp parallel for private(fac1, m) for (k = 1; k <= grid_points[2]-2; k++) { fac1 = 1./lhs[n+2][i][j][k]; lhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k]; lhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k]; for (m = 0; m < 3; m++) { rhs[m][i][j][k] = fac1*rhs[m][i][j][k]; } lhs[n+2][i1][j][k] = lhs[n+2][i1][j][k] - lhs[n+1][i1][j][k]*lhs[n+3][i][j][k]; lhs[n+3][i1][j][k] = lhs[n+3][i1][j][k] - lhs[n+1][i1][j][k]*lhs[n+4][i][j][k]; for (m = 0; m < 3; m++) { rhs[m][i1][j][k] = rhs[m][i1][j][k] - lhs[n+1][i1][j][k]*rhs[m][i][j][k]; } lhs[n+1][i2][j][k] = lhs[n+1][i2][j][k] - lhs[n+0][i2][j][k]*lhs[n+3][i][j][k]; lhs[n+2][i2][j][k] = lhs[n+2][i2][j][k] - lhs[n+0][i2][j][k]*lhs[n+4][i][j][k]; for (m = 0; m < 3; m++) { rhs[m][i2][j][k] = rhs[m][i2][j][k] - lhs[n+0][i2][j][k]*rhs[m][i][j][k]; } } } } /*-------------------------------------------------------------------- c The last two rows in this grid block are a bit different, c since they do not have two more rows available for the c elimination of off-diagonal entries --------------------------------------------------------------------*/ i = grid_points[0]-2; i1 = grid_points[0]-1; #pragma omp parallel for private(m, fac1, fac2) for (j = 1; j <= grid_points[1]-2; j++) { #pragma omp parallel for private(m, fac1, fac2) for (k = 1; k <= grid_points[2]-2; k++) { fac1 = 1.0/lhs[n+2][i][j][k]; lhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k]; lhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k]; for (m = 0; m < 3; m++) { rhs[m][i][j][k] = fac1*rhs[m][i][j][k]; } lhs[n+2][i1][j][k] = lhs[n+2][i1][j][k] - lhs[n+1][i1][j][k]*lhs[n+3][i][j][k]; lhs[n+3][i1][j][k] = lhs[n+3][i1][j][k] - lhs[n+1][i1][j][k]*lhs[n+4][i][j][k]; for (m = 0; m < 3; m++) { rhs[m][i1][j][k] = rhs[m][i1][j][k] - lhs[n+1][i1][j][k]*rhs[m][i][j][k]; } /*-------------------------------------------------------------------- c scale the last row immediately --------------------------------------------------------------------*/ fac2 = 1./lhs[n+2][i1][j][k]; for (m = 0; m < 3; m++) { rhs[m][i1][j][k] = fac2*rhs[m][i1][j][k]; } } } /*-------------------------------------------------------------------- c do the u+c and the u-c factors --------------------------------------------------------------------*/ #pragma omp parallel for private(n, i1, i2, fac1) for (m = 3; m < 5; m++) { n = (m-3+1)*5; for (i = 0; i <= grid_points[0]-3; i++) { i1 = i + 1; i2 = i + 2; #pragma omp parallel for private(n, i1, i2, fac1) for (j = 1; j <= grid_points[1]-2; j++) { for (k = 1; k <= grid_points[2]-2; k++) { fac1 = 1./lhs[n+2][i][j][k]; lhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k]; lhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k]; rhs[m][i][j][k] = fac1*rhs[m][i][j][k]; lhs[n+2][i1][j][k] = lhs[n+2][i1][j][k] - lhs[n+1][i1][j][k]*lhs[n+3][i][j][k]; lhs[n+3][i1][j][k] = lhs[n+3][i1][j][k] - lhs[n+1][i1][j][k]*lhs[n+4][i][j][k]; rhs[m][i1][j][k] = rhs[m][i1][j][k] - lhs[n+1][i1][j][k]*rhs[m][i][j][k]; lhs[n+1][i2][j][k] = lhs[n+1][i2][j][k] - lhs[n+0][i2][j][k]*lhs[n+3][i][j][k]; lhs[n+2][i2][j][k] = lhs[n+2][i2][j][k] - lhs[n+0][i2][j][k]*lhs[n+4][i][j][k]; rhs[m][i2][j][k] = rhs[m][i2][j][k] - lhs[n+0][i2][j][k]*rhs[m][i][j][k]; } } } /*-------------------------------------------------------------------- c And again the last two rows separately --------------------------------------------------------------------*/ i = grid_points[0]-2; i1 = grid_points[0]-1; #pragma omp parallel for private(fac1, fac2) for (j = 1; j <= grid_points[1]-2; j++) { for (k = 1; k <= grid_points[2]-2; k++) { fac1 = 1./lhs[n+2][i][j][k]; lhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k]; lhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k]; rhs[m][i][j][k] = fac1*rhs[m][i][j][k]; lhs[n+2][i1][j][k] = lhs[n+2][i1][j][k] - lhs[n+1][i1][j][k]*lhs[n+3][i][j][k]; lhs[n+3][i1][j][k] = lhs[n+3][i1][j][k] - lhs[n+1][i1][j][k]*lhs[n+4][i][j][k]; rhs[m][i1][j][k] = rhs[m][i1][j][k] - lhs[n+1][i1][j][k]*rhs[m][i][j][k]; /*-------------------------------------------------------------------- c Scale the last row immediately --------------------------------------------------------------------*/ fac2 = 1./lhs[n+2][i1][j][k]; rhs[m][i1][j][k] = fac2*rhs[m][i1][j][k]; } } } /*-------------------------------------------------------------------- c BACKSUBSTITUTION --------------------------------------------------------------------*/ i = grid_points[0]-2; i1 = grid_points[0]-1; n = 0; for (m = 0; m < 3; m++) { #pragma omp parallel for for (j = 1; j <= grid_points[1]-2; j++) { #pragma omp parallel for for (k = 1; k <= grid_points[2]-2; k++) { rhs[m][i][j][k] = rhs[m][i][j][k] - lhs[n+3][i][j][k]*rhs[m][i1][j][k]; } } } #pragma omp parallel for private(n) for (m = 3; m < 5; m++) { #pragma omp parallel for private(n) for (j = 1; j <= grid_points[1]-2; j++) { #pragma omp parallel for private(n) for (k = 1; k <= grid_points[2]-2; k++) { n = (m-3+1)*5; rhs[m][i][j][k] = rhs[m][i][j][k] - lhs[n+3][i][j][k]*rhs[m][i1][j][k]; } } } /*-------------------------------------------------------------------- c The first three factors --------------------------------------------------------------------*/ n = 0; for (i = grid_points[0]-3; i >= 0; i--) { i1 = i + 1; i2 = i + 2; #pragma omp parallel for for (m = 0; m < 3; m++) { #pragma omp parallel for for (j = 1; j <= grid_points[1]-2; j++) { #pragma omp parallel for for (k = 1; k <= grid_points[2]-2; k++) { rhs[m][i][j][k] = rhs[m][i][j][k] - lhs[n+3][i][j][k]*rhs[m][i1][j][k] - lhs[n+4][i][j][k]*rhs[m][i2][j][k]; } } } } /*-------------------------------------------------------------------- c And the remaining two --------------------------------------------------------------------*/ #pragma omp parallel for private(n, i1, i2) for (m = 3; m < 5; m++) { n = (m-3+1)*5; for (i = grid_points[0]-3; i >= 0; i--) { i1 = i + 1; i2 = i + 2; #pragma omp parallel for for (j = 1; j <= grid_points[1]-2; j++) { for (k = 1; k <= grid_points[2]-2; k++) { rhs[m][i][j][k] = rhs[m][i][j][k] - lhs[n+3][i][j][k]*rhs[m][i1][j][k] - lhs[n+4][i][j][k]*rhs[m][i2][j][k]; } } } } } /*-------------------------------------------------------------------- c Do the block-diagonal inversion --------------------------------------------------------------------*/ ninvr(); } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void y_solve(void) { { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c this function performs the solution of the approximate factorization c step in the y-direction for all five matrix components c simultaneously. The Thomas algorithm is employed to solve the c systems for the y-lines. Boundary conditions are non-periodic --------------------------------------------------------------------*/ int i, j, k, n, j1, j2, m; double fac1, fac2; /*-------------------------------------------------------------------- c FORWARD ELIMINATION --------------------------------------------------------------------*/ lhsy(); n = 0; for (j = 0; j <= grid_points[1]-3; j++) { j1 = j + 1; j2 = j + 2; #pragma omp parallel for private(fac1) for (i = 1; i <= grid_points[0]-2; i++) { #pragma omp parallel for private(fac1) for (k = 1; k <= grid_points[2]-2; k++) { fac1 = 1./lhs[n+2][i][j][k]; lhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k]; lhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k]; for (m = 0; m < 3; m++) { rhs[m][i][j][k] = fac1*rhs[m][i][j][k]; } lhs[n+2][i][j1][k] = lhs[n+2][i][j1][k] - lhs[n+1][i][j1][k]*lhs[n+3][i][j][k]; lhs[n+3][i][j1][k] = lhs[n+3][i][j1][k] - lhs[n+1][i][j1][k]*lhs[n+4][i][j][k]; for (m = 0; m < 3; m++) { rhs[m][i][j1][k] = rhs[m][i][j1][k] - lhs[n+1][i][j1][k]*rhs[m][i][j][k]; } lhs[n+1][i][j2][k] = lhs[n+1][i][j2][k] - lhs[n+0][i][j2][k]*lhs[n+3][i][j][k]; lhs[n+2][i][j2][k] = lhs[n+2][i][j2][k] - lhs[n+0][i][j2][k]*lhs[n+4][i][j][k]; for (m = 0; m < 3; m++) { rhs[m][i][j2][k] = rhs[m][i][j2][k] - lhs[n+0][i][j2][k]*rhs[m][i][j][k]; } } } } /*-------------------------------------------------------------------- c The last two rows in this grid block are a bit different, c since they do not have two more rows available for the c elimination of off-diagonal entries --------------------------------------------------------------------*/ j = grid_points[1]-2; j1 = grid_points[1]-1; #pragma omp parallel for private(fac1, fac2) for (i = 1; i <= grid_points[0]-2; i++) { #pragma omp parallel for private(fac1, fac2) for (k = 1; k <= grid_points[2]-2; k++) { fac1 = 1./lhs[n+2][i][j][k]; lhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k]; lhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k]; for (m = 0; m < 3; m++) { rhs[m][i][j][k] = fac1*rhs[m][i][j][k]; } lhs[n+2][i][j1][k] = lhs[n+2][i][j1][k] - lhs[n+1][i][j1][k]*lhs[n+3][i][j][k]; lhs[n+3][i][j1][k] = lhs[n+3][i][j1][k] - lhs[n+1][i][j1][k]*lhs[n+4][i][j][k]; for (m = 0; m < 3; m++) { rhs[m][i][j1][k] = rhs[m][i][j1][k] - lhs[n+1][i][j1][k]*rhs[m][i][j][k]; } /*-------------------------------------------------------------------- c scale the last row immediately --------------------------------------------------------------------*/ fac2 = 1./lhs[n+2][i][j1][k]; for (m = 0; m < 3; m++) { rhs[m][i][j1][k] = fac2*rhs[m][i][j1][k]; } } } /*-------------------------------------------------------------------- c do the u+c and the u-c factors --------------------------------------------------------------------*/ #pragma omp parallel for private(n, j1, j2, fac1) for (m = 3; m < 5; m++) { n = (m-3+1)*5; for (j = 0; j <= grid_points[1]-3; j++) { j1 = j + 1; j2 = j + 2; #pragma omp parallel for private(n, j1, j2, fac1) for (i = 1; i <= grid_points[0]-2; i++) { for (k = 1; k <= grid_points[2]-2; k++) { fac1 = 1./lhs[n+2][i][j][k]; lhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k]; lhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k]; rhs[m][i][j][k] = fac1*rhs[m][i][j][k]; lhs[n+2][i][j1][k] = lhs[n+2][i][j1][k] - lhs[n+1][i][j1][k]*lhs[n+3][i][j][k]; lhs[n+3][i][j1][k] = lhs[n+3][i][j1][k] - lhs[n+1][i][j1][k]*lhs[n+4][i][j][k]; rhs[m][i][j1][k] = rhs[m][i][j1][k] - lhs[n+1][i][j1][k]*rhs[m][i][j][k]; lhs[n+1][i][j2][k] = lhs[n+1][i][j2][k] - lhs[n+0][i][j2][k]*lhs[n+3][i][j][k]; lhs[n+2][i][j2][k] = lhs[n+2][i][j2][k] - lhs[n+0][i][j2][k]*lhs[n+4][i][j][k]; rhs[m][i][j2][k] = rhs[m][i][j2][k] - lhs[n+0][i][j2][k]*rhs[m][i][j][k]; } } } /*-------------------------------------------------------------------- c And again the last two rows separately --------------------------------------------------------------------*/ j = grid_points[1]-2; j1 = grid_points[1]-1; #pragma omp parallel for private(fac1, fac2) for (i = 1; i <= grid_points[0]-2; i++) { for (k = 1; k <= grid_points[2]-2; k++) { fac1 = 1./lhs[n+2][i][j][k]; lhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k]; lhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k]; rhs[m][i][j][k] = fac1*rhs[m][i][j][k]; lhs[n+2][i][j1][k] = lhs[n+2][i][j1][k] - lhs[n+1][i][j1][k]*lhs[n+3][i][j][k]; lhs[n+3][i][j1][k] = lhs[n+3][i][j1][k] - lhs[n+1][i][j1][k]*lhs[n+4][i][j][k]; rhs[m][i][j1][k] = rhs[m][i][j1][k] - lhs[n+1][i][j1][k]*rhs[m][i][j][k]; /*-------------------------------------------------------------------- c Scale the last row immediately --------------------------------------------------------------------*/ fac2 = 1./lhs[n+2][i][j1][k]; rhs[m][i][j1][k] = fac2*rhs[m][i][j1][k]; } } } /*-------------------------------------------------------------------- c BACKSUBSTITUTION --------------------------------------------------------------------*/ j = grid_points[1]-2; j1 = grid_points[1]-1; n = 0; for (m = 0; m < 3; m++) { #pragma omp parallel for for (i = 1; i <= grid_points[0]-2; i++) { #pragma omp parallel for for (k = 1; k <= grid_points[2]-2; k++) { rhs[m][i][j][k] = rhs[m][i][j][k] - lhs[n+3][i][j][k]*rhs[m][i][j1][k]; } } } #pragma omp parallel for private(n) for (m = 3; m < 5; m++) { #pragma omp parallel for private(n) for (i = 1; i <= grid_points[0]-2; i++) { #pragma omp parallel for private(n) for (k = 1; k <= grid_points[2]-2; k++) { n = (m-3+1)*5; rhs[m][i][j][k] = rhs[m][i][j][k] - lhs[n+3][i][j][k]*rhs[m][i][j1][k]; } } } /*-------------------------------------------------------------------- c The first three factors --------------------------------------------------------------------*/ n = 0; #pragma omp parallel for private(j1, j2) for (m = 0; m < 3; m++) { for (j = grid_points[1]-3; j >= 0; j--) { j1 = j + 1; j2 = j + 2; #pragma omp parallel for private(j1, j2) for (i = 1; i <= grid_points[0]-2; i++) { #pragma omp parallel for private(j1, j2) for (k = 1; k <= grid_points[2]-2; k++) { rhs[m][i][j][k] = rhs[m][i][j][k] - lhs[n+3][i][j][k]*rhs[m][i][j1][k] - lhs[n+4][i][j][k]*rhs[m][i][j2][k]; } } } } /*-------------------------------------------------------------------- c And the remaining two --------------------------------------------------------------------*/ #pragma omp parallel for private(n, j1, j2) for (m = 3; m < 5; m++) { n = (m-3+1)*5; for (j = grid_points[1]-3; j >= 0; j--) { j1 = j + 1; j2 = j1 + 1; #pragma omp parallel for for (i = 1; i <= grid_points[0]-2; i++) { for (k = 1; k <= grid_points[2]-2; k++) { rhs[m][i][j][k] = rhs[m][i][j][k] - lhs[n+3][i][j][k]*rhs[m][i][j1][k] - lhs[n+4][i][j][k]*rhs[m][i][j2][k]; } } } } } pinvr(); } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void z_solve(void) { { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c this function performs the solution of the approximate factorization c step in the z-direction for all five matrix components c simultaneously. The Thomas algorithm is employed to solve the c systems for the z-lines. Boundary conditions are non-periodic c-------------------------------------------------------------------*/ int i, j, k, n, k1, k2, m; double fac1, fac2; /*-------------------------------------------------------------------- c FORWARD ELIMINATION c-------------------------------------------------------------------*/ lhsz(); n = 0; #pragma omp parallel for private(k1, k2, fac1) for (i = 1; i <= grid_points[0]-2; i++) { #pragma omp parallel for private(k1, k2, fac1) for (j = 1; j <= grid_points[1]-2; j++) { for (k = 0; k <= grid_points[2]-3; k++) { k1 = k + 1; k2 = k + 2; fac1 = 1./lhs[n+2][i][j][k]; lhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k]; lhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k]; for (m = 0; m < 3; m++) { rhs[m][i][j][k] = fac1*rhs[m][i][j][k]; } lhs[n+2][i][j][k1] = lhs[n+2][i][j][k1] - lhs[n+1][i][j][k1]*lhs[n+3][i][j][k]; lhs[n+3][i][j][k1] = lhs[n+3][i][j][k1] - lhs[n+1][i][j][k1]*lhs[n+4][i][j][k]; for (m = 0; m < 3; m++) { rhs[m][i][j][k1] = rhs[m][i][j][k1] - lhs[n+1][i][j][k1]*rhs[m][i][j][k]; } lhs[n+1][i][j][k2] = lhs[n+1][i][j][k2] - lhs[n+0][i][j][k2]*lhs[n+3][i][j][k]; lhs[n+2][i][j][k2] = lhs[n+2][i][j][k2] - lhs[n+0][i][j][k2]*lhs[n+4][i][j][k]; for (m = 0; m < 3; m++) { rhs[m][i][j][k2] = rhs[m][i][j][k2] - lhs[n+0][i][j][k2]*rhs[m][i][j][k]; } } } } /*-------------------------------------------------------------------- c The last two rows in this grid block are a bit different, c since they do not have two more rows available for the c elimination of off-diagonal entries c-------------------------------------------------------------------*/ k = grid_points[2]-2; k1 = grid_points[2]-1; #pragma omp parallel for private(fac1) for (i = 1; i <= grid_points[0]-2; i++) { #pragma omp parallel for private(fac1) for (j = 1; j <= grid_points[1]-2; j++) { fac1 = 1./lhs[n+2][i][j][k]; lhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k]; lhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k]; for (m = 0; m < 3; m++) { rhs[m][i][j][k] = fac1*rhs[m][i][j][k]; } lhs[n+2][i][j][k1] = lhs[n+2][i][j][k1] - lhs[n+1][i][j][k1]*lhs[n+3][i][j][k]; lhs[n+3][i][j][k1] = lhs[n+3][i][j][k1] - lhs[n+1][i][j][k1]*lhs[n+4][i][j][k]; for (m = 0; m < 3; m++) { rhs[m][i][j][k1] = rhs[m][i][j][k1] - lhs[n+1][i][j][k1]*rhs[m][i][j][k]; } /*-------------------------------------------------------------------- c scale the last row immediately c-------------------------------------------------------------------*/ fac2 = 1./lhs[n+2][i][j][k1]; for (m = 0; m < 3; m++) { rhs[m][i][j][k1] = fac2*rhs[m][i][j][k1]; } } } /*-------------------------------------------------------------------- c do the u+c and the u-c factors c-------------------------------------------------------------------*/ #pragma omp parallel for private(n, k1, k2, fac1) for (m = 3; m < 5; m++) { n = (m-3+1)*5; #pragma omp parallel for private(k1, k2, fac1) for (i = 1; i <= grid_points[0]-2; i++) { #pragma omp parallel for private(k1, k2, fac1) for (j = 1; j <= grid_points[1]-2; j++) { for (k = 0; k <= grid_points[2]-3; k++) { k1 = k + 1; k2 = k + 2; fac1 = 1./lhs[n+2][i][j][k]; lhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k]; lhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k]; rhs[m][i][j][k] = fac1*rhs[m][i][j][k]; lhs[n+2][i][j][k1] = lhs[n+2][i][j][k1] - lhs[n+1][i][j][k1]*lhs[n+3][i][j][k]; lhs[n+3][i][j][k1] = lhs[n+3][i][j][k1] - lhs[n+1][i][j][k1]*lhs[n+4][i][j][k]; rhs[m][i][j][k1] = rhs[m][i][j][k1] - lhs[n+1][i][j][k1]*rhs[m][i][j][k]; lhs[n+1][i][j][k2] = lhs[n+1][i][j][k2] - lhs[n+0][i][j][k2]*lhs[n+3][i][j][k]; lhs[n+2][i][j][k2] = lhs[n+2][i][j][k2] - lhs[n+0][i][j][k2]*lhs[n+4][i][j][k]; rhs[m][i][j][k2] = rhs[m][i][j][k2] - lhs[n+0][i][j][k2]*rhs[m][i][j][k]; } } } /*-------------------------------------------------------------------- c And again the last two rows separately c-------------------------------------------------------------------*/ k = grid_points[2]-2; k1 = grid_points[2]-1; #pragma omp parallel for private(fac1, fac2) for (i = 1; i <= grid_points[0]-2; i++) { for (j = 1; j <= grid_points[1]-2; j++) { fac1 = 1./lhs[n+2][i][j][k]; lhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k]; lhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k]; rhs[m][i][j][k] = fac1*rhs[m][i][j][k]; lhs[n+2][i][j][k1] = lhs[n+2][i][j][k1] - lhs[n+1][i][j][k1]*lhs[n+3][i][j][k]; lhs[n+3][i][j][k1] = lhs[n+3][i][j][k1] - lhs[n+1][i][j][k1]*lhs[n+4][i][j][k]; rhs[m][i][j][k1] = rhs[m][i][j][k1] - lhs[n+1][i][j][k1]*rhs[m][i][j][k]; /*-------------------------------------------------------------------- c Scale the last row immediately (some of this is overkill c if this is the last cell) c-------------------------------------------------------------------*/ fac2 = 1./lhs[n+2][i][j][k1]; rhs[m][i][j][k1] = fac2*rhs[m][i][j][k1]; } } } /*-------------------------------------------------------------------- c BACKSUBSTITUTION c-------------------------------------------------------------------*/ k = grid_points[2]-2; k1 = grid_points[2]-1; n = 0; for (m = 0; m < 3; m++) { #pragma omp parallel for for (i = 1; i <= grid_points[0]-2; i++) { #pragma omp parallel for for (j = 1; j <= grid_points[1]-2; j++) { rhs[m][i][j][k] = rhs[m][i][j][k] - lhs[n+3][i][j][k]*rhs[m][i][j][k1]; } } } #pragma omp parallel for private(n) for (m = 3; m < 5; m++) { n = (m-3+1)*5; #pragma omp parallel for for (i = 1; i <= grid_points[0]-2; i++) { #pragma omp parallel for for (j = 1; j <= grid_points[1]-2; j++) { rhs[m][i][j][k] = rhs[m][i][j][k] - lhs[n+3][i][j][k]*rhs[m][i][j][k1]; } } } /*-------------------------------------------------------------------- c Whether or not this is the last processor, we always have c to complete the back-substitution c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c The first three factors c-------------------------------------------------------------------*/ n = 0; #pragma omp parallel for private(k1, k2) for (m = 0; m < 3; m++) { #pragma omp parallel for private(k1, k2) for (i = 1; i <= grid_points[0]-2; i++) { #pragma omp parallel for private(k1, k2) for (j = 1; j <= grid_points[1]-2; j++) { for (k = grid_points[2]-3; k >= 0; k--) { k1 = k + 1; k2 = k + 2; rhs[m][i][j][k] = rhs[m][i][j][k] - lhs[n+3][i][j][k]*rhs[m][i][j][k1] - lhs[n+4][i][j][k]*rhs[m][i][j][k2]; } } } } /*-------------------------------------------------------------------- c And the remaining two c-------------------------------------------------------------------*/ #pragma omp parallel for private(n, k1, k2) for (m = 3; m < 5; m++) { n = (m-3+1)*5; #pragma omp parallel for private(k1, k2) for (i = 1; i <= grid_points[0]-2; i++) { #pragma omp parallel for private(k1, k2) for (j = 1; j <= grid_points[1]-2; j++) { for (k = grid_points[2]-3; k >= 0; k--) { k1 = k + 1; k2 = k + 2; rhs[m][i][j][k] = rhs[m][i][j][k] - lhs[n+3][i][j][k]*rhs[m][i][j][k1] - lhs[n+4][i][j][k]*rhs[m][i][j][k2]; } } } } } tzetar(); }
OMPIRBuilder.h
//===- IR/OpenMPIRBuilder.h - OpenMP encoding builder for LLVM IR - C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines the OpenMPIRBuilder class and helpers used as a convenient // way to create LLVM instructions for OpenMP directives. // //===----------------------------------------------------------------------===// #ifndef LLVM_FRONTEND_OPENMP_OMPIRBUILDER_H #define LLVM_FRONTEND_OPENMP_OMPIRBUILDER_H #include "llvm/Frontend/OpenMP/OMPConstants.h" #include "llvm/IR/DebugLoc.h" #include "llvm/IR/IRBuilder.h" #include "llvm/Support/Allocator.h" #include <forward_list> namespace llvm { class CanonicalLoopInfo; /// An interface to create LLVM-IR for OpenMP directives. /// /// Each OpenMP directive has a corresponding public generator method. class OpenMPIRBuilder { public: /// Create a new OpenMPIRBuilder operating on the given module \p M. This will /// not have an effect on \p M (see initialize). OpenMPIRBuilder(Module &M) : M(M), Builder(M.getContext()) {} ~OpenMPIRBuilder(); /// Initialize the internal state, this will put structures types and /// potentially other helpers into the underlying module. Must be called /// before any other method and only once! void initialize(); /// Finalize the underlying module, e.g., by outlining regions. /// \param Fn The function to be finalized. If not used, /// all functions are finalized. /// \param AllowExtractorSinking Flag to include sinking instructions, /// emitted by CodeExtractor, in the /// outlined region. Default is false. void finalize(Function *Fn = nullptr, bool AllowExtractorSinking = false); /// Add attributes known for \p FnID to \p Fn. void addAttributes(omp::RuntimeFunction FnID, Function &Fn); /// Type used throughout for insertion points. using InsertPointTy = IRBuilder<>::InsertPoint; /// Callback type for variable finalization (think destructors). /// /// \param CodeGenIP is the insertion point at which the finalization code /// should be placed. /// /// A finalize callback knows about all objects that need finalization, e.g. /// destruction, when the scope of the currently generated construct is left /// at the time, and location, the callback is invoked. using FinalizeCallbackTy = std::function<void(InsertPointTy CodeGenIP)>; struct FinalizationInfo { /// The finalization callback provided by the last in-flight invocation of /// createXXXX for the directive of kind DK. FinalizeCallbackTy FiniCB; /// The directive kind of the innermost directive that has an associated /// region which might require finalization when it is left. omp::Directive DK; /// Flag to indicate if the directive is cancellable. bool IsCancellable; }; /// Push a finalization callback on the finalization stack. /// /// NOTE: Temporary solution until Clang CG is gone. void pushFinalizationCB(const FinalizationInfo &FI) { FinalizationStack.push_back(FI); } /// Pop the last finalization callback from the finalization stack. /// /// NOTE: Temporary solution until Clang CG is gone. void popFinalizationCB() { FinalizationStack.pop_back(); } /// Callback type for body (=inner region) code generation /// /// The callback takes code locations as arguments, each describing a /// location at which code might need to be generated or a location that is /// the target of control transfer. /// /// \param AllocaIP is the insertion point at which new alloca instructions /// should be placed. /// \param CodeGenIP is the insertion point at which the body code should be /// placed. /// \param ContinuationBB is the basic block target to leave the body. /// /// Note that all blocks pointed to by the arguments have terminators. using BodyGenCallbackTy = function_ref<void(InsertPointTy AllocaIP, InsertPointTy CodeGenIP, BasicBlock &ContinuationBB)>; // This is created primarily for sections construct as llvm::function_ref // (BodyGenCallbackTy) is not storable (as described in the comments of // function_ref class - function_ref contains non-ownable reference // to the callable. using StorableBodyGenCallbackTy = std::function<void(InsertPointTy AllocaIP, InsertPointTy CodeGenIP, BasicBlock &ContinuationBB)>; /// Callback type for loop body code generation. /// /// \param CodeGenIP is the insertion point where the loop's body code must be /// placed. This will be a dedicated BasicBlock with a /// conditional branch from the loop condition check and /// terminated with an unconditional branch to the loop /// latch. /// \param IndVar is the induction variable usable at the insertion point. using LoopBodyGenCallbackTy = function_ref<void(InsertPointTy CodeGenIP, Value *IndVar)>; /// Callback type for variable privatization (think copy & default /// constructor). /// /// \param AllocaIP is the insertion point at which new alloca instructions /// should be placed. /// \param CodeGenIP is the insertion point at which the privatization code /// should be placed. /// \param Original The value being copied/created, should not be used in the /// generated IR. /// \param Inner The equivalent of \p Original that should be used in the /// generated IR; this is equal to \p Original if the value is /// a pointer and can thus be passed directly, otherwise it is /// an equivalent but different value. /// \param ReplVal The replacement value, thus a copy or new created version /// of \p Inner. /// /// \returns The new insertion point where code generation continues and /// \p ReplVal the replacement value. using PrivatizeCallbackTy = function_ref<InsertPointTy( InsertPointTy AllocaIP, InsertPointTy CodeGenIP, Value &Original, Value &Inner, Value *&ReplVal)>; /// Description of a LLVM-IR insertion point (IP) and a debug/source location /// (filename, line, column, ...). struct LocationDescription { template <typename T, typename U> LocationDescription(const IRBuilder<T, U> &IRB) : IP(IRB.saveIP()), DL(IRB.getCurrentDebugLocation()) {} LocationDescription(const InsertPointTy &IP) : IP(IP) {} LocationDescription(const InsertPointTy &IP, const DebugLoc &DL) : IP(IP), DL(DL) {} InsertPointTy IP; DebugLoc DL; }; /// Emitter methods for OpenMP directives. /// ///{ /// Generator for '#omp barrier' /// /// \param Loc The location where the barrier directive was encountered. /// \param DK The kind of directive that caused the barrier. /// \param ForceSimpleCall Flag to force a simple (=non-cancellation) barrier. /// \param CheckCancelFlag Flag to indicate a cancel barrier return value /// should be checked and acted upon. /// /// \returns The insertion point after the barrier. InsertPointTy createBarrier(const LocationDescription &Loc, omp::Directive DK, bool ForceSimpleCall = false, bool CheckCancelFlag = true); /// Generator for '#omp cancel' /// /// \param Loc The location where the directive was encountered. /// \param IfCondition The evaluated 'if' clause expression, if any. /// \param CanceledDirective The kind of directive that is cancled. /// /// \returns The insertion point after the barrier. InsertPointTy createCancel(const LocationDescription &Loc, Value *IfCondition, omp::Directive CanceledDirective); /// Generator for '#omp parallel' /// /// \param Loc The insert and source location description. /// \param AllocaIP The insertion points to be used for alloca instructions. /// \param BodyGenCB Callback that will generate the region code. /// \param PrivCB Callback to copy a given variable (think copy constructor). /// \param FiniCB Callback to finalize variable copies. /// \param IfCondition The evaluated 'if' clause expression, if any. /// \param NumThreads The evaluated 'num_threads' clause expression, if any. /// \param ProcBind The value of the 'proc_bind' clause (see ProcBindKind). /// \param IsCancellable Flag to indicate a cancellable parallel region. /// /// \returns The insertion position *after* the parallel. IRBuilder<>::InsertPoint createParallel(const LocationDescription &Loc, InsertPointTy AllocaIP, BodyGenCallbackTy BodyGenCB, PrivatizeCallbackTy PrivCB, FinalizeCallbackTy FiniCB, Value *IfCondition, Value *NumThreads, omp::ProcBindKind ProcBind, bool IsCancellable); /// Generator for the control flow structure of an OpenMP canonical loop. /// /// This generator operates on the logical iteration space of the loop, i.e. /// the caller only has to provide a loop trip count of the loop as defined by /// base language semantics. The trip count is interpreted as an unsigned /// integer. The induction variable passed to \p BodyGenCB will be of the same /// type and run from 0 to \p TripCount - 1. It is up to the callback to /// convert the logical iteration variable to the loop counter variable in the /// loop body. /// /// \param Loc The insert and source location description. The insert /// location can be between two instructions or the end of a /// degenerate block (e.g. a BB under construction). /// \param BodyGenCB Callback that will generate the loop body code. /// \param TripCount Number of iterations the loop body is executed. /// \param Name Base name used to derive BB and instruction names. /// /// \returns An object representing the created control flow structure which /// can be used for loop-associated directives. CanonicalLoopInfo *createCanonicalLoop(const LocationDescription &Loc, LoopBodyGenCallbackTy BodyGenCB, Value *TripCount, const Twine &Name = "loop"); /// Generator for the control flow structure of an OpenMP canonical loop. /// /// Instead of a logical iteration space, this allows specifying user-defined /// loop counter values using increment, upper- and lower bounds. To /// disambiguate the terminology when counting downwards, instead of lower /// bounds we use \p Start for the loop counter value in the first body /// iteration. /// /// Consider the following limitations: /// /// * A loop counter space over all integer values of its bit-width cannot be /// represented. E.g using uint8_t, its loop trip count of 256 cannot be /// stored into an 8 bit integer): /// /// DO I = 0, 255, 1 /// /// * Unsigned wrapping is only supported when wrapping only "once"; E.g. /// effectively counting downwards: /// /// for (uint8_t i = 100u; i > 0; i += 127u) /// /// /// TODO: May need to add additional parameters to represent: /// /// * Allow representing downcounting with unsigned integers. /// /// * Sign of the step and the comparison operator might disagree: /// /// for (int i = 0; i < 42; i -= 1u) /// // /// \param Loc The insert and source location description. /// \param BodyGenCB Callback that will generate the loop body code. /// \param Start Value of the loop counter for the first iterations. /// \param Stop Loop counter values past this will stop the loop. /// \param Step Loop counter increment after each iteration; negative /// means counting down. /// \param IsSigned Whether Start, Stop and Step are signed integers. /// \param InclusiveStop Whether \p Stop itself is a valid value for the loop /// counter. /// \param ComputeIP Insertion point for instructions computing the trip /// count. Can be used to ensure the trip count is available /// at the outermost loop of a loop nest. If not set, /// defaults to the preheader of the generated loop. /// \param Name Base name used to derive BB and instruction names. /// /// \returns An object representing the created control flow structure which /// can be used for loop-associated directives. CanonicalLoopInfo *createCanonicalLoop(const LocationDescription &Loc, LoopBodyGenCallbackTy BodyGenCB, Value *Start, Value *Stop, Value *Step, bool IsSigned, bool InclusiveStop, InsertPointTy ComputeIP = {}, const Twine &Name = "loop"); /// Collapse a loop nest into a single loop. /// /// Merges loops of a loop nest into a single CanonicalLoopNest representation /// that has the same number of innermost loop iterations as the origin loop /// nest. The induction variables of the input loops are derived from the /// collapsed loop's induction variable. This is intended to be used to /// implement OpenMP's collapse clause. Before applying a directive, /// collapseLoops normalizes a loop nest to contain only a single loop and the /// directive's implementation does not need to handle multiple loops itself. /// This does not remove the need to handle all loop nest handling by /// directives, such as the ordered(<n>) clause or the simd schedule-clause /// modifier of the worksharing-loop directive. /// /// Example: /// \code /// for (int i = 0; i < 7; ++i) // Canonical loop "i" /// for (int j = 0; j < 9; ++j) // Canonical loop "j" /// body(i, j); /// \endcode /// /// After collapsing with Loops={i,j}, the loop is changed to /// \code /// for (int ij = 0; ij < 63; ++ij) { /// int i = ij / 9; /// int j = ij % 9; /// body(i, j); /// } /// \endcode /// /// In the current implementation, the following limitations apply: /// /// * All input loops have an induction variable of the same type. /// /// * The collapsed loop will have the same trip count integer type as the /// input loops. Therefore it is possible that the collapsed loop cannot /// represent all iterations of the input loops. For instance, assuming a /// 32 bit integer type, and two input loops both iterating 2^16 times, the /// theoretical trip count of the collapsed loop would be 2^32 iteration, /// which cannot be represented in an 32-bit integer. Behavior is undefined /// in this case. /// /// * The trip counts of every input loop must be available at \p ComputeIP. /// Non-rectangular loops are not yet supported. /// /// * At each nest level, code between a surrounding loop and its nested loop /// is hoisted into the loop body, and such code will be executed more /// often than before collapsing (or not at all if any inner loop iteration /// has a trip count of 0). This is permitted by the OpenMP specification. /// /// \param DL Debug location for instructions added for collapsing, /// such as instructions to compute/derive the input loop's /// induction variables. /// \param Loops Loops in the loop nest to collapse. Loops are specified /// from outermost-to-innermost and every control flow of a /// loop's body must pass through its directly nested loop. /// \param ComputeIP Where additional instruction that compute the collapsed /// trip count. If not set, defaults to before the generated /// loop. /// /// \returns The CanonicalLoopInfo object representing the collapsed loop. CanonicalLoopInfo *collapseLoops(DebugLoc DL, ArrayRef<CanonicalLoopInfo *> Loops, InsertPointTy ComputeIP); /// Modifies the canonical loop to be a statically-scheduled workshare loop. /// /// This takes a \p LoopInfo representing a canonical loop, such as the one /// created by \p createCanonicalLoop and emits additional instructions to /// turn it into a workshare loop. In particular, it calls to an OpenMP /// runtime function in the preheader to obtain the loop bounds to be used in /// the current thread, updates the relevant instructions in the canonical /// loop and calls to an OpenMP runtime finalization function after the loop. /// /// TODO: Workshare loops with static scheduling may contain up to two loops /// that fulfill the requirements of an OpenMP canonical loop. One for /// iterating over all iterations of a chunk and another one for iterating /// over all chunks that are executed on the same thread. Returning /// CanonicalLoopInfo objects representing them may eventually be useful for /// the apply clause planned in OpenMP 6.0, but currently whether these are /// canonical loops is irrelevant. /// /// \param DL Debug location for instructions added for the /// workshare-loop construct itself. /// \param CLI A descriptor of the canonical loop to workshare. /// \param AllocaIP An insertion point for Alloca instructions usable in the /// preheader of the loop. /// \param NeedsBarrier Indicates whether a barrier must be inserted after /// the loop. /// \param Chunk The size of loop chunk considered as a unit when /// scheduling. If \p nullptr, defaults to 1. /// /// \returns Point where to insert code after the workshare construct. InsertPointTy applyStaticWorkshareLoop(DebugLoc DL, CanonicalLoopInfo *CLI, InsertPointTy AllocaIP, bool NeedsBarrier, Value *Chunk = nullptr); /// Modifies the canonical loop to be a dynamically-scheduled workshare loop. /// /// This takes a \p LoopInfo representing a canonical loop, such as the one /// created by \p createCanonicalLoop and emits additional instructions to /// turn it into a workshare loop. In particular, it calls to an OpenMP /// runtime function in the preheader to obtain, and then in each iteration /// to update the loop counter. /// /// \param DL Debug location for instructions added for the /// workshare-loop construct itself. /// \param CLI A descriptor of the canonical loop to workshare. /// \param AllocaIP An insertion point for Alloca instructions usable in the /// preheader of the loop. /// \param SchedType Type of scheduling to be passed to the init function. /// \param NeedsBarrier Indicates whether a barrier must be insterted after /// the loop. /// \param Chunk The size of loop chunk considered as a unit when /// scheduling. If \p nullptr, defaults to 1. /// /// \returns Point where to insert code after the workshare construct. InsertPointTy applyDynamicWorkshareLoop(DebugLoc DL, CanonicalLoopInfo *CLI, InsertPointTy AllocaIP, omp::OMPScheduleType SchedType, bool NeedsBarrier, Value *Chunk = nullptr); /// Modifies the canonical loop to be a workshare loop. /// /// This takes a \p LoopInfo representing a canonical loop, such as the one /// created by \p createCanonicalLoop and emits additional instructions to /// turn it into a workshare loop. In particular, it calls to an OpenMP /// runtime function in the preheader to obtain the loop bounds to be used in /// the current thread, updates the relevant instructions in the canonical /// loop and calls to an OpenMP runtime finalization function after the loop. /// /// \param DL Debug location for instructions added for the /// workshare-loop construct itself. /// \param CLI A descriptor of the canonical loop to workshare. /// \param AllocaIP An insertion point for Alloca instructions usable in the /// preheader of the loop. /// \param NeedsBarrier Indicates whether a barrier must be insterted after /// the loop. /// /// \returns Point where to insert code after the workshare construct. InsertPointTy applyWorkshareLoop(DebugLoc DL, CanonicalLoopInfo *CLI, InsertPointTy AllocaIP, bool NeedsBarrier); /// Tile a loop nest. /// /// Tiles the loops of \p Loops by the tile sizes in \p TileSizes. Loops in /// \p/ Loops must be perfectly nested, from outermost to innermost loop /// (i.e. Loops.front() is the outermost loop). The trip count llvm::Value /// of every loop and every tile sizes must be usable in the outermost /// loop's preheader. This implies that the loop nest is rectangular. /// /// Example: /// \code /// for (int i = 0; i < 15; ++i) // Canonical loop "i" /// for (int j = 0; j < 14; ++j) // Canonical loop "j" /// body(i, j); /// \endcode /// /// After tiling with Loops={i,j} and TileSizes={5,7}, the loop is changed to /// \code /// for (int i1 = 0; i1 < 3; ++i1) /// for (int j1 = 0; j1 < 2; ++j1) /// for (int i2 = 0; i2 < 5; ++i2) /// for (int j2 = 0; j2 < 7; ++j2) /// body(i1*3+i2, j1*3+j2); /// \endcode /// /// The returned vector are the loops {i1,j1,i2,j2}. The loops i1 and j1 are /// referred to the floor, and the loops i2 and j2 are the tiles. Tiling also /// handles non-constant trip counts, non-constant tile sizes and trip counts /// that are not multiples of the tile size. In the latter case the tile loop /// of the last floor-loop iteration will have fewer iterations than specified /// as its tile size. /// /// /// @param DL Debug location for instructions added by tiling, for /// instance the floor- and tile trip count computation. /// @param Loops Loops to tile. The CanonicalLoopInfo objects are /// invalidated by this method, i.e. should not used after /// tiling. /// @param TileSizes For each loop in \p Loops, the tile size for that /// dimensions. /// /// \returns A list of generated loops. Contains twice as many loops as the /// input loop nest; the first half are the floor loops and the /// second half are the tile loops. std::vector<CanonicalLoopInfo *> tileLoops(DebugLoc DL, ArrayRef<CanonicalLoopInfo *> Loops, ArrayRef<Value *> TileSizes); /// Fully unroll a loop. /// /// Instead of unrolling the loop immediately (and duplicating its body /// instructions), it is deferred to LLVM's LoopUnrollPass by adding loop /// metadata. /// /// \param DL Debug location for instructions added by unrolling. /// \param Loop The loop to unroll. The loop will be invalidated. void unrollLoopFull(DebugLoc DL, CanonicalLoopInfo *Loop); /// Fully or partially unroll a loop. How the loop is unrolled is determined /// using LLVM's LoopUnrollPass. /// /// \param DL Debug location for instructions added by unrolling. /// \param Loop The loop to unroll. The loop will be invalidated. void unrollLoopHeuristic(DebugLoc DL, CanonicalLoopInfo *Loop); /// Partially unroll a loop. /// /// The CanonicalLoopInfo of the unrolled loop for use with chained /// loop-associated directive can be requested using \p UnrolledCLI. Not /// needing the CanonicalLoopInfo allows more efficient code generation by /// deferring the actual unrolling to the LoopUnrollPass using loop metadata. /// A loop-associated directive applied to the unrolled loop needs to know the /// new trip count which means that if using a heuristically determined unroll /// factor (\p Factor == 0), that factor must be computed immediately. We are /// using the same logic as the LoopUnrollPass to derived the unroll factor, /// but which assumes that some canonicalization has taken place (e.g. /// Mem2Reg, LICM, GVN, Inlining, etc.). That is, the heuristic will perform /// better when the unrolled loop's CanonicalLoopInfo is not needed. /// /// \param DL Debug location for instructions added by unrolling. /// \param Loop The loop to unroll. The loop will be invalidated. /// \param Factor The factor to unroll the loop by. A factor of 0 /// indicates that a heuristic should be used to determine /// the unroll-factor. /// \param UnrolledCLI If non-null, receives the CanonicalLoopInfo of the /// partially unrolled loop. Otherwise, uses loop metadata /// to defer unrolling to the LoopUnrollPass. void unrollLoopPartial(DebugLoc DL, CanonicalLoopInfo *Loop, int32_t Factor, CanonicalLoopInfo **UnrolledCLI); /// Generator for '#omp flush' /// /// \param Loc The location where the flush directive was encountered void createFlush(const LocationDescription &Loc); /// Generator for '#omp taskwait' /// /// \param Loc The location where the taskwait directive was encountered. void createTaskwait(const LocationDescription &Loc); /// Generator for '#omp taskyield' /// /// \param Loc The location where the taskyield directive was encountered. void createTaskyield(const LocationDescription &Loc); /// Functions used to generate reductions. Such functions take two Values /// representing LHS and RHS of the reduction, respectively, and a reference /// to the value that is updated to refer to the reduction result. using ReductionGenTy = function_ref<InsertPointTy(InsertPointTy, Value *, Value *, Value *&)>; /// Functions used to generate atomic reductions. Such functions take two /// Values representing pointers to LHS and RHS of the reduction, as well as /// the element type of these pointers. They are expected to atomically /// update the LHS to the reduced value. using AtomicReductionGenTy = function_ref<InsertPointTy(InsertPointTy, Type *, Value *, Value *)>; /// Information about an OpenMP reduction. struct ReductionInfo { ReductionInfo(Type *ElementType, Value *Variable, Value *PrivateVariable, ReductionGenTy ReductionGen, AtomicReductionGenTy AtomicReductionGen) : ElementType(ElementType), Variable(Variable), PrivateVariable(PrivateVariable), ReductionGen(ReductionGen), AtomicReductionGen(AtomicReductionGen) { assert(cast<PointerType>(Variable->getType()) ->isOpaqueOrPointeeTypeMatches(ElementType) && "Invalid elem type"); } /// Reduction element type, must match pointee type of variable. Type *ElementType; /// Reduction variable of pointer type. Value *Variable; /// Thread-private partial reduction variable. Value *PrivateVariable; /// Callback for generating the reduction body. The IR produced by this will /// be used to combine two values in a thread-safe context, e.g., under /// lock or within the same thread, and therefore need not be atomic. ReductionGenTy ReductionGen; /// Callback for generating the atomic reduction body, may be null. The IR /// produced by this will be used to atomically combine two values during /// reduction. If null, the implementation will use the non-atomic version /// along with the appropriate synchronization mechanisms. AtomicReductionGenTy AtomicReductionGen; }; // TODO: provide atomic and non-atomic reduction generators for reduction // operators defined by the OpenMP specification. /// Generator for '#omp reduction'. /// /// Emits the IR instructing the runtime to perform the specific kind of /// reductions. Expects reduction variables to have been privatized and /// initialized to reduction-neutral values separately. Emits the calls to /// runtime functions as well as the reduction function and the basic blocks /// performing the reduction atomically and non-atomically. /// /// The code emitted for the following: /// /// \code /// type var_1; /// type var_2; /// #pragma omp <directive> reduction(reduction-op:var_1,var_2) /// /* body */; /// \endcode /// /// corresponds to the following sketch. /// /// \code /// void _outlined_par() { /// // N is the number of different reductions. /// void *red_array[] = {privatized_var_1, privatized_var_2, ...}; /// switch(__kmpc_reduce(..., N, /*size of data in red array*/, red_array, /// _omp_reduction_func, /// _gomp_critical_user.reduction.var)) { /// case 1: { /// var_1 = var_1 <reduction-op> privatized_var_1; /// var_2 = var_2 <reduction-op> privatized_var_2; /// // ... /// __kmpc_end_reduce(...); /// break; /// } /// case 2: { /// _Atomic<ReductionOp>(var_1, privatized_var_1); /// _Atomic<ReductionOp>(var_2, privatized_var_2); /// // ... /// break; /// } /// default: break; /// } /// } /// /// void _omp_reduction_func(void **lhs, void **rhs) { /// *(type *)lhs[0] = *(type *)lhs[0] <reduction-op> *(type *)rhs[0]; /// *(type *)lhs[1] = *(type *)lhs[1] <reduction-op> *(type *)rhs[1]; /// // ... /// } /// \endcode /// /// \param Loc The location where the reduction was /// encountered. Must be within the associate /// directive and after the last local access to the /// reduction variables. /// \param AllocaIP An insertion point suitable for allocas usable /// in reductions. /// \param ReductionInfos A list of info on each reduction variable. /// \param IsNoWait A flag set if the reduction is marked as nowait. InsertPointTy createReductions(const LocationDescription &Loc, InsertPointTy AllocaIP, ArrayRef<ReductionInfo> ReductionInfos, bool IsNoWait = false); ///} /// Return the insertion point used by the underlying IRBuilder. InsertPointTy getInsertionPoint() { return Builder.saveIP(); } /// Update the internal location to \p Loc. bool updateToLocation(const LocationDescription &Loc) { Builder.restoreIP(Loc.IP); Builder.SetCurrentDebugLocation(Loc.DL); return Loc.IP.getBlock() != nullptr; } /// Return the function declaration for the runtime function with \p FnID. FunctionCallee getOrCreateRuntimeFunction(Module &M, omp::RuntimeFunction FnID); Function *getOrCreateRuntimeFunctionPtr(omp::RuntimeFunction FnID); /// Return the (LLVM-IR) string describing the source location \p LocStr. Constant *getOrCreateSrcLocStr(StringRef LocStr); /// Return the (LLVM-IR) string describing the default source location. Constant *getOrCreateDefaultSrcLocStr(); /// Return the (LLVM-IR) string describing the source location identified by /// the arguments. Constant *getOrCreateSrcLocStr(StringRef FunctionName, StringRef FileName, unsigned Line, unsigned Column); /// Return the (LLVM-IR) string describing the DebugLoc \p DL. Use \p F as /// fallback if \p DL does not specify the function name. Constant *getOrCreateSrcLocStr(DebugLoc DL, Function *F = nullptr); /// Return the (LLVM-IR) string describing the source location \p Loc. Constant *getOrCreateSrcLocStr(const LocationDescription &Loc); /// Return an ident_t* encoding the source location \p SrcLocStr and \p Flags. /// TODO: Create a enum class for the Reserve2Flags Value *getOrCreateIdent(Constant *SrcLocStr, omp::IdentFlag Flags = omp::IdentFlag(0), unsigned Reserve2Flags = 0); /// Create a global flag \p Namein the module with initial value \p Value. GlobalValue *createGlobalFlag(unsigned Value, StringRef Name); /// Generate control flow and cleanup for cancellation. /// /// \param CancelFlag Flag indicating if the cancellation is performed. /// \param CanceledDirective The kind of directive that is cancled. /// \param ExitCB Extra code to be generated in the exit block. void emitCancelationCheckImpl(Value *CancelFlag, omp::Directive CanceledDirective, FinalizeCallbackTy ExitCB = {}); /// Generate a barrier runtime call. /// /// \param Loc The location at which the request originated and is fulfilled. /// \param DK The directive which caused the barrier /// \param ForceSimpleCall Flag to force a simple (=non-cancellation) barrier. /// \param CheckCancelFlag Flag to indicate a cancel barrier return value /// should be checked and acted upon. /// /// \returns The insertion point after the barrier. InsertPointTy emitBarrierImpl(const LocationDescription &Loc, omp::Directive DK, bool ForceSimpleCall, bool CheckCancelFlag); /// Generate a flush runtime call. /// /// \param Loc The location at which the request originated and is fulfilled. void emitFlush(const LocationDescription &Loc); /// The finalization stack made up of finalize callbacks currently in-flight, /// wrapped into FinalizationInfo objects that reference also the finalization /// target block and the kind of cancellable directive. SmallVector<FinalizationInfo, 8> FinalizationStack; /// Return true if the last entry in the finalization stack is of kind \p DK /// and cancellable. bool isLastFinalizationInfoCancellable(omp::Directive DK) { return !FinalizationStack.empty() && FinalizationStack.back().IsCancellable && FinalizationStack.back().DK == DK; } /// Generate a taskwait runtime call. /// /// \param Loc The location at which the request originated and is fulfilled. void emitTaskwaitImpl(const LocationDescription &Loc); /// Generate a taskyield runtime call. /// /// \param Loc The location at which the request originated and is fulfilled. void emitTaskyieldImpl(const LocationDescription &Loc); /// Return the current thread ID. /// /// \param Ident The ident (ident_t*) describing the query origin. Value *getOrCreateThreadID(Value *Ident); /// The underlying LLVM-IR module Module &M; /// The LLVM-IR Builder used to create IR. IRBuilder<> Builder; /// Map to remember source location strings StringMap<Constant *> SrcLocStrMap; /// Map to remember existing ident_t*. DenseMap<std::pair<Constant *, uint64_t>, Value *> IdentMap; /// Helper that contains information about regions we need to outline /// during finalization. struct OutlineInfo { using PostOutlineCBTy = std::function<void(Function &)>; PostOutlineCBTy PostOutlineCB; BasicBlock *EntryBB, *ExitBB; /// Collect all blocks in between EntryBB and ExitBB in both the given /// vector and set. void collectBlocks(SmallPtrSetImpl<BasicBlock *> &BlockSet, SmallVectorImpl<BasicBlock *> &BlockVector); /// Return the function that contains the region to be outlined. Function *getFunction() const { return EntryBB->getParent(); } }; /// Collection of regions that need to be outlined during finalization. SmallVector<OutlineInfo, 16> OutlineInfos; /// Collection of owned canonical loop objects that eventually need to be /// free'd. std::forward_list<CanonicalLoopInfo> LoopInfos; /// Add a new region that will be outlined later. void addOutlineInfo(OutlineInfo &&OI) { OutlineInfos.emplace_back(OI); } /// An ordered map of auto-generated variables to their unique names. /// It stores variables with the following names: 1) ".gomp_critical_user_" + /// <critical_section_name> + ".var" for "omp critical" directives; 2) /// <mangled_name_for_global_var> + ".cache." for cache for threadprivate /// variables. StringMap<AssertingVH<Constant>, BumpPtrAllocator> InternalVars; /// Create the global variable holding the offload mappings information. GlobalVariable *createOffloadMaptypes(SmallVectorImpl<uint64_t> &Mappings, std::string VarName); /// Create the global variable holding the offload names information. GlobalVariable * createOffloadMapnames(SmallVectorImpl<llvm::Constant *> &Names, std::string VarName); struct MapperAllocas { AllocaInst *ArgsBase = nullptr; AllocaInst *Args = nullptr; AllocaInst *ArgSizes = nullptr; }; /// Create the allocas instruction used in call to mapper functions. void createMapperAllocas(const LocationDescription &Loc, InsertPointTy AllocaIP, unsigned NumOperands, struct MapperAllocas &MapperAllocas); /// Create the call for the target mapper function. /// \param Loc The source location description. /// \param MapperFunc Function to be called. /// \param SrcLocInfo Source location information global. /// \param MaptypesArg The argument types. /// \param MapnamesArg The argument names. /// \param MapperAllocas The AllocaInst used for the call. /// \param DeviceID Device ID for the call. /// \param NumOperands Number of operands in the call. void emitMapperCall(const LocationDescription &Loc, Function *MapperFunc, Value *SrcLocInfo, Value *MaptypesArg, Value *MapnamesArg, struct MapperAllocas &MapperAllocas, int64_t DeviceID, unsigned NumOperands); public: /// Generator for __kmpc_copyprivate /// /// \param Loc The source location description. /// \param BufSize Number of elements in the buffer. /// \param CpyBuf List of pointers to data to be copied. /// \param CpyFn function to call for copying data. /// \param DidIt flag variable; 1 for 'single' thread, 0 otherwise. /// /// \return The insertion position *after* the CopyPrivate call. InsertPointTy createCopyPrivate(const LocationDescription &Loc, llvm::Value *BufSize, llvm::Value *CpyBuf, llvm::Value *CpyFn, llvm::Value *DidIt); /// Generator for '#omp single' /// /// \param Loc The source location description. /// \param BodyGenCB Callback that will generate the region code. /// \param FiniCB Callback to finalize variable copies. /// \param DidIt Local variable used as a flag to indicate 'single' thread /// /// \returns The insertion position *after* the single call. InsertPointTy createSingle(const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB, FinalizeCallbackTy FiniCB, llvm::Value *DidIt); /// Generator for '#omp master' /// /// \param Loc The insert and source location description. /// \param BodyGenCB Callback that will generate the region code. /// \param FiniCB Callback to finalize variable copies. /// /// \returns The insertion position *after* the master. InsertPointTy createMaster(const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB, FinalizeCallbackTy FiniCB); /// Generator for '#omp masked' /// /// \param Loc The insert and source location description. /// \param BodyGenCB Callback that will generate the region code. /// \param FiniCB Callback to finialize variable copies. /// /// \returns The insertion position *after* the masked. InsertPointTy createMasked(const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB, FinalizeCallbackTy FiniCB, Value *Filter); /// Generator for '#omp critical' /// /// \param Loc The insert and source location description. /// \param BodyGenCB Callback that will generate the region body code. /// \param FiniCB Callback to finalize variable copies. /// \param CriticalName name of the lock used by the critical directive /// \param HintInst Hint Instruction for hint clause associated with critical /// /// \returns The insertion position *after* the critical. InsertPointTy createCritical(const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB, FinalizeCallbackTy FiniCB, StringRef CriticalName, Value *HintInst); /// Generator for '#omp ordered depend (source | sink)' /// /// \param Loc The insert and source location description. /// \param AllocaIP The insertion point to be used for alloca instructions. /// \param NumLoops The number of loops in depend clause. /// \param StoreValues The value will be stored in vector address. /// \param Name The name of alloca instruction. /// \param IsDependSource If true, depend source; otherwise, depend sink. /// /// \return The insertion position *after* the ordered. InsertPointTy createOrderedDepend(const LocationDescription &Loc, InsertPointTy AllocaIP, unsigned NumLoops, ArrayRef<llvm::Value *> StoreValues, const Twine &Name, bool IsDependSource); /// Generator for '#omp ordered [threads | simd]' /// /// \param Loc The insert and source location description. /// \param BodyGenCB Callback that will generate the region code. /// \param FiniCB Callback to finalize variable copies. /// \param IsThreads If true, with threads clause or without clause; /// otherwise, with simd clause; /// /// \returns The insertion position *after* the ordered. InsertPointTy createOrderedThreadsSimd(const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB, FinalizeCallbackTy FiniCB, bool IsThreads); /// Generator for '#omp sections' /// /// \param Loc The insert and source location description. /// \param AllocaIP The insertion points to be used for alloca instructions. /// \param SectionCBs Callbacks that will generate body of each section. /// \param PrivCB Callback to copy a given variable (think copy constructor). /// \param FiniCB Callback to finalize variable copies. /// \param IsCancellable Flag to indicate a cancellable parallel region. /// \param IsNowait If true, barrier - to ensure all sections are executed /// before moving forward will not be generated. /// \returns The insertion position *after* the sections. InsertPointTy createSections(const LocationDescription &Loc, InsertPointTy AllocaIP, ArrayRef<StorableBodyGenCallbackTy> SectionCBs, PrivatizeCallbackTy PrivCB, FinalizeCallbackTy FiniCB, bool IsCancellable, bool IsNowait); /// Generator for '#omp section' /// /// \param Loc The insert and source location description. /// \param BodyGenCB Callback that will generate the region body code. /// \param FiniCB Callback to finalize variable copies. /// \returns The insertion position *after* the section. InsertPointTy createSection(const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB, FinalizeCallbackTy FiniCB); /// Generate conditional branch and relevant BasicBlocks through which private /// threads copy the 'copyin' variables from Master copy to threadprivate /// copies. /// /// \param IP insertion block for copyin conditional /// \param MasterVarPtr a pointer to the master variable /// \param PrivateVarPtr a pointer to the threadprivate variable /// \param IntPtrTy Pointer size type /// \param BranchtoEnd Create a branch between the copyin.not.master blocks // and copy.in.end block /// /// \returns The insertion point where copying operation to be emitted. InsertPointTy createCopyinClauseBlocks(InsertPointTy IP, Value *MasterAddr, Value *PrivateAddr, llvm::IntegerType *IntPtrTy, bool BranchtoEnd = true); /// Create a runtime call for kmpc_Alloc /// /// \param Loc The insert and source location description. /// \param Size Size of allocated memory space /// \param Allocator Allocator information instruction /// \param Name Name of call Instruction for OMP_alloc /// /// \returns CallInst to the OMP_Alloc call CallInst *createOMPAlloc(const LocationDescription &Loc, Value *Size, Value *Allocator, std::string Name = ""); /// Create a runtime call for kmpc_free /// /// \param Loc The insert and source location description. /// \param Addr Address of memory space to be freed /// \param Allocator Allocator information instruction /// \param Name Name of call Instruction for OMP_Free /// /// \returns CallInst to the OMP_Free call CallInst *createOMPFree(const LocationDescription &Loc, Value *Addr, Value *Allocator, std::string Name = ""); /// Create a runtime call for kmpc_threadprivate_cached /// /// \param Loc The insert and source location description. /// \param Pointer pointer to data to be cached /// \param Size size of data to be cached /// \param Name Name of call Instruction for callinst /// /// \returns CallInst to the thread private cache call. CallInst *createCachedThreadPrivate(const LocationDescription &Loc, llvm::Value *Pointer, llvm::ConstantInt *Size, const llvm::Twine &Name = Twine("")); /// The `omp target` interface /// /// For more information about the usage of this interface, /// \see openmp/libomptarget/deviceRTLs/common/include/target.h /// ///{ /// Create a runtime call for kmpc_target_init /// /// \param Loc The insert and source location description. /// \param IsSPMD Flag to indicate if the kernel is an SPMD kernel or not. /// \param RequiresFullRuntime Indicate if a full device runtime is necessary. InsertPointTy createTargetInit(const LocationDescription &Loc, bool IsSPMD, bool RequiresFullRuntime); /// Create a runtime call for kmpc_target_deinit /// /// \param Loc The insert and source location description. /// \param IsSPMD Flag to indicate if the kernel is an SPMD kernel or not. /// \param RequiresFullRuntime Indicate if a full device runtime is necessary. void createTargetDeinit(const LocationDescription &Loc, bool IsSPMD, bool RequiresFullRuntime); ///} /// Declarations for LLVM-IR types (simple, array, function and structure) are /// generated below. Their names are defined and used in OpenMPKinds.def. Here /// we provide the declarations, the initializeTypes function will provide the /// values. /// ///{ #define OMP_TYPE(VarName, InitValue) Type *VarName = nullptr; #define OMP_ARRAY_TYPE(VarName, ElemTy, ArraySize) \ ArrayType *VarName##Ty = nullptr; \ PointerType *VarName##PtrTy = nullptr; #define OMP_FUNCTION_TYPE(VarName, IsVarArg, ReturnType, ...) \ FunctionType *VarName = nullptr; \ PointerType *VarName##Ptr = nullptr; #define OMP_STRUCT_TYPE(VarName, StrName, ...) \ StructType *VarName = nullptr; \ PointerType *VarName##Ptr = nullptr; #include "llvm/Frontend/OpenMP/OMPKinds.def" ///} private: /// Create all simple and struct types exposed by the runtime and remember /// the llvm::PointerTypes of them for easy access later. void initializeTypes(Module &M); /// Common interface for generating entry calls for OMP Directives. /// if the directive has a region/body, It will set the insertion /// point to the body /// /// \param OMPD Directive to generate entry blocks for /// \param EntryCall Call to the entry OMP Runtime Function /// \param ExitBB block where the region ends. /// \param Conditional indicate if the entry call result will be used /// to evaluate a conditional of whether a thread will execute /// body code or not. /// /// \return The insertion position in exit block InsertPointTy emitCommonDirectiveEntry(omp::Directive OMPD, Value *EntryCall, BasicBlock *ExitBB, bool Conditional = false); /// Common interface to finalize the region /// /// \param OMPD Directive to generate exiting code for /// \param FinIP Insertion point for emitting Finalization code and exit call /// \param ExitCall Call to the ending OMP Runtime Function /// \param HasFinalize indicate if the directive will require finalization /// and has a finalization callback in the stack that /// should be called. /// /// \return The insertion position in exit block InsertPointTy emitCommonDirectiveExit(omp::Directive OMPD, InsertPointTy FinIP, Instruction *ExitCall, bool HasFinalize = true); /// Common Interface to generate OMP inlined regions /// /// \param OMPD Directive to generate inlined region for /// \param EntryCall Call to the entry OMP Runtime Function /// \param ExitCall Call to the ending OMP Runtime Function /// \param BodyGenCB Body code generation callback. /// \param FiniCB Finalization Callback. Will be called when finalizing region /// \param Conditional indicate if the entry call result will be used /// to evaluate a conditional of whether a thread will execute /// body code or not. /// \param HasFinalize indicate if the directive will require finalization /// and has a finalization callback in the stack that /// should be called. /// \param IsCancellable if HasFinalize is set to true, indicate if the /// the directive should be cancellable. /// \return The insertion point after the region InsertPointTy EmitOMPInlinedRegion(omp::Directive OMPD, Instruction *EntryCall, Instruction *ExitCall, BodyGenCallbackTy BodyGenCB, FinalizeCallbackTy FiniCB, bool Conditional = false, bool HasFinalize = true, bool IsCancellable = false); /// Get the platform-specific name separator. /// \param Parts different parts of the final name that needs separation /// \param FirstSeparator First separator used between the initial two /// parts of the name. /// \param Separator separator used between all of the rest consecutive /// parts of the name static std::string getNameWithSeparators(ArrayRef<StringRef> Parts, StringRef FirstSeparator, StringRef Separator); /// Gets (if variable with the given name already exist) or creates /// internal global variable with the specified Name. The created variable has /// linkage CommonLinkage by default and is initialized by null value. /// \param Ty Type of the global variable. If it is exist already the type /// must be the same. /// \param Name Name of the variable. Constant *getOrCreateOMPInternalVariable(Type *Ty, const Twine &Name, unsigned AddressSpace = 0); /// Returns corresponding lock object for the specified critical region /// name. If the lock object does not exist it is created, otherwise the /// reference to the existing copy is returned. /// \param CriticalName Name of the critical region. /// Value *getOMPCriticalRegionLock(StringRef CriticalName); /// Callback type for Atomic Expression update /// ex: /// \code{.cpp} /// unsigned x = 0; /// #pragma omp atomic update /// x = Expr(x_old); //Expr() is any legal operation /// \endcode /// /// \param XOld the value of the atomic memory address to use for update /// \param IRB reference to the IRBuilder to use /// /// \returns Value to update X to. using AtomicUpdateCallbackTy = const function_ref<Value *(Value *XOld, IRBuilder<> &IRB)>; private: enum AtomicKind { Read, Write, Update, Capture }; /// Determine whether to emit flush or not /// /// \param Loc The insert and source location description. /// \param AO The required atomic ordering /// \param AK The OpenMP atomic operation kind used. /// /// \returns wether a flush was emitted or not bool checkAndEmitFlushAfterAtomic(const LocationDescription &Loc, AtomicOrdering AO, AtomicKind AK); /// Emit atomic update for constructs: X = X BinOp Expr ,or X = Expr BinOp X /// For complex Operations: X = UpdateOp(X) => CmpExch X, old_X, UpdateOp(X) /// Only Scalar data types. /// /// \param AllocIP Instruction to create AllocaInst before. /// \param X The target atomic pointer to be updated /// \param Expr The value to update X with. /// \param AO Atomic ordering of the generated atomic /// instructions. /// \param RMWOp The binary operation used for update. If /// operation is not supported by atomicRMW, /// or belong to {FADD, FSUB, BAD_BINOP}. /// Then a `cmpExch` based atomic will be generated. /// \param UpdateOp Code generator for complex expressions that cannot be /// expressed through atomicrmw instruction. /// \param VolatileX true if \a X volatile? /// \param IsXBinopExpr true if \a X is Left H.S. in Right H.S. part of the /// update expression, false otherwise. /// (e.g. true for X = X BinOp Expr) /// /// \returns A pair of the old value of X before the update, and the value /// used for the update. std::pair<Value *, Value *> emitAtomicUpdate(Instruction *AllocIP, Value *X, Value *Expr, AtomicOrdering AO, AtomicRMWInst::BinOp RMWOp, AtomicUpdateCallbackTy &UpdateOp, bool VolatileX, bool IsXBinopExpr); /// Emit the binary op. described by \p RMWOp, using \p Src1 and \p Src2 . /// /// \Return The instruction Value *emitRMWOpAsInstruction(Value *Src1, Value *Src2, AtomicRMWInst::BinOp RMWOp); public: /// a struct to pack relevant information while generating atomic Ops struct AtomicOpValue { Value *Var = nullptr; bool IsSigned = false; bool IsVolatile = false; }; /// Emit atomic Read for : V = X --- Only Scalar data types. /// /// \param Loc The insert and source location description. /// \param X The target pointer to be atomically read /// \param V Memory address where to store atomically read /// value /// \param AO Atomic ordering of the generated atomic /// instructions. /// /// \return Insertion point after generated atomic read IR. InsertPointTy createAtomicRead(const LocationDescription &Loc, AtomicOpValue &X, AtomicOpValue &V, AtomicOrdering AO); /// Emit atomic write for : X = Expr --- Only Scalar data types. /// /// \param Loc The insert and source location description. /// \param X The target pointer to be atomically written to /// \param Expr The value to store. /// \param AO Atomic ordering of the generated atomic /// instructions. /// /// \return Insertion point after generated atomic Write IR. InsertPointTy createAtomicWrite(const LocationDescription &Loc, AtomicOpValue &X, Value *Expr, AtomicOrdering AO); /// Emit atomic update for constructs: X = X BinOp Expr ,or X = Expr BinOp X /// For complex Operations: X = UpdateOp(X) => CmpExch X, old_X, UpdateOp(X) /// Only Scalar data types. /// /// \param Loc The insert and source location description. /// \param AllocIP Instruction to create AllocaInst before. /// \param X The target atomic pointer to be updated /// \param Expr The value to update X with. /// \param AO Atomic ordering of the generated atomic instructions. /// \param RMWOp The binary operation used for update. If operation /// is not supported by atomicRMW, or belong to /// {FADD, FSUB, BAD_BINOP}. Then a `cmpExch` based /// atomic will be generated. /// \param UpdateOp Code generator for complex expressions that cannot be /// expressed through atomicrmw instruction. /// \param IsXBinopExpr true if \a X is Left H.S. in Right H.S. part of the /// update expression, false otherwise. /// (e.g. true for X = X BinOp Expr) /// /// \return Insertion point after generated atomic update IR. InsertPointTy createAtomicUpdate(const LocationDescription &Loc, Instruction *AllocIP, AtomicOpValue &X, Value *Expr, AtomicOrdering AO, AtomicRMWInst::BinOp RMWOp, AtomicUpdateCallbackTy &UpdateOp, bool IsXBinopExpr); /// Emit atomic update for constructs: --- Only Scalar data types /// V = X; X = X BinOp Expr , /// X = X BinOp Expr; V = X, /// V = X; X = Expr BinOp X, /// X = Expr BinOp X; V = X, /// V = X; X = UpdateOp(X), /// X = UpdateOp(X); V = X, /// /// \param Loc The insert and source location description. /// \param AllocIP Instruction to create AllocaInst before. /// \param X The target atomic pointer to be updated /// \param V Memory address where to store captured value /// \param Expr The value to update X with. /// \param AO Atomic ordering of the generated atomic instructions /// \param RMWOp The binary operation used for update. If /// operation is not supported by atomicRMW, or belong to /// {FADD, FSUB, BAD_BINOP}. Then a cmpExch based /// atomic will be generated. /// \param UpdateOp Code generator for complex expressions that cannot be /// expressed through atomicrmw instruction. /// \param UpdateExpr true if X is an in place update of the form /// X = X BinOp Expr or X = Expr BinOp X /// \param IsXBinopExpr true if X is Left H.S. in Right H.S. part of the /// update expression, false otherwise. /// (e.g. true for X = X BinOp Expr) /// \param IsPostfixUpdate true if original value of 'x' must be stored in /// 'v', not an updated one. /// /// \return Insertion point after generated atomic capture IR. InsertPointTy createAtomicCapture(const LocationDescription &Loc, Instruction *AllocIP, AtomicOpValue &X, AtomicOpValue &V, Value *Expr, AtomicOrdering AO, AtomicRMWInst::BinOp RMWOp, AtomicUpdateCallbackTy &UpdateOp, bool UpdateExpr, bool IsPostfixUpdate, bool IsXBinopExpr); /// Create the control flow structure of a canonical OpenMP loop. /// /// The emitted loop will be disconnected, i.e. no edge to the loop's /// preheader and no terminator in the AfterBB. The OpenMPIRBuilder's /// IRBuilder location is not preserved. /// /// \param DL DebugLoc used for the instructions in the skeleton. /// \param TripCount Value to be used for the trip count. /// \param F Function in which to insert the BasicBlocks. /// \param PreInsertBefore Where to insert BBs that execute before the body, /// typically the body itself. /// \param PostInsertBefore Where to insert BBs that execute after the body. /// \param Name Base name used to derive BB /// and instruction names. /// /// \returns The CanonicalLoopInfo that represents the emitted loop. CanonicalLoopInfo *createLoopSkeleton(DebugLoc DL, Value *TripCount, Function *F, BasicBlock *PreInsertBefore, BasicBlock *PostInsertBefore, const Twine &Name = {}); }; /// Class to represented the control flow structure of an OpenMP canonical loop. /// /// The control-flow structure is standardized for easy consumption by /// directives associated with loops. For instance, the worksharing-loop /// construct may change this control flow such that each loop iteration is /// executed on only one thread. The constraints of a canonical loop in brief /// are: /// /// * The number of loop iterations must have been computed before entering the /// loop. /// /// * Has an (unsigned) logical induction variable that starts at zero and /// increments by one. /// /// * The loop's CFG itself has no side-effects. The OpenMP specification /// itself allows side-effects, but the order in which they happen, including /// how often or whether at all, is unspecified. We expect that the frontend /// will emit those side-effect instructions somewhere (e.g. before the loop) /// such that the CanonicalLoopInfo itself can be side-effect free. /// /// Keep in mind that CanonicalLoopInfo is meant to only describe a repeated /// execution of a loop body that satifies these constraints. It does NOT /// represent arbitrary SESE regions that happen to contain a loop. Do not use /// CanonicalLoopInfo for such purposes. /// /// The control flow can be described as follows: /// /// Preheader /// | /// /-> Header /// | | /// | Cond---\ /// | | | /// | Body | /// | | | | /// | <...> | /// | | | | /// \--Latch | /// | /// Exit /// | /// After /// /// The loop is thought to start at PreheaderIP (at the Preheader's terminator, /// including) and end at AfterIP (at the After's first instruction, excluding). /// That is, instructions in the Preheader and After blocks (except the /// Preheader's terminator) are out of CanonicalLoopInfo's control and may have /// side-effects. Typically, the Preheader is used to compute the loop's trip /// count. The instructions from BodyIP (at the Body block's first instruction, /// excluding) until the Latch are also considered outside CanonicalLoopInfo's /// control and thus can have side-effects. The body block is the single entry /// point into the loop body, which may contain arbitrary control flow as long /// as all control paths eventually branch to the Latch block. /// /// TODO: Consider adding another standardized BasicBlock between Body CFG and /// Latch to guarantee that there is only a single edge to the latch. It would /// make loop transformations easier to not needing to consider multiple /// predecessors of the latch (See redirectAllPredecessorsTo) and would give us /// an equivalant to PreheaderIP, AfterIP and BodyIP for inserting code that /// executes after each body iteration. /// /// There must be no loop-carried dependencies through llvm::Values. This is /// equivalant to that the Latch has no PHINode and the Header's only PHINode is /// for the induction variable. /// /// All code in Header, Cond, Latch and Exit (plus the terminator of the /// Preheader) are CanonicalLoopInfo's responsibility and their build-up checked /// by assertOK(). They are expected to not be modified unless explicitly /// modifying the CanonicalLoopInfo through a methods that applies a OpenMP /// loop-associated construct such as applyWorkshareLoop, tileLoops, unrollLoop, /// etc. These methods usually invalidate the CanonicalLoopInfo and re-use its /// basic blocks. After invalidation, the CanonicalLoopInfo must not be used /// anymore as its underlying control flow may not exist anymore. /// Loop-transformation methods such as tileLoops, collapseLoops and unrollLoop /// may also return a new CanonicalLoopInfo that can be passed to other /// loop-associated construct implementing methods. These loop-transforming /// methods may either create a new CanonicalLoopInfo usually using /// createLoopSkeleton and invalidate the input CanonicalLoopInfo, or reuse and /// modify one of the input CanonicalLoopInfo and return it as representing the /// modified loop. What is done is an implementation detail of /// transformation-implementing method and callers should always assume that the /// CanonicalLoopInfo passed to it is invalidated and a new object is returned. /// Returned CanonicalLoopInfo have the same structure and guarantees as the one /// created by createCanonicalLoop, such that transforming methods do not have /// to special case where the CanonicalLoopInfo originated from. /// /// Generally, methods consuming CanonicalLoopInfo do not need an /// OpenMPIRBuilder::InsertPointTy as argument, but use the locations of the /// CanonicalLoopInfo to insert new or modify existing instructions. Unless /// documented otherwise, methods consuming CanonicalLoopInfo do not invalidate /// any InsertPoint that is outside CanonicalLoopInfo's control. Specifically, /// any InsertPoint in the Preheader, After or Block can still be used after /// calling such a method. /// /// TODO: Provide mechanisms for exception handling and cancellation points. /// /// Defined outside OpenMPIRBuilder because nested classes cannot be /// forward-declared, e.g. to avoid having to include the entire OMPIRBuilder.h. class CanonicalLoopInfo { friend class OpenMPIRBuilder; private: BasicBlock *Header = nullptr; BasicBlock *Cond = nullptr; BasicBlock *Latch = nullptr; BasicBlock *Exit = nullptr; /// Add the control blocks of this loop to \p BBs. /// /// This does not include any block from the body, including the one returned /// by getBody(). /// /// FIXME: This currently includes the Preheader and After blocks even though /// their content is (mostly) not under CanonicalLoopInfo's control. /// Re-evaluated whether this makes sense. void collectControlBlocks(SmallVectorImpl<BasicBlock *> &BBs); public: /// Returns whether this object currently represents the IR of a loop. If /// returning false, it may have been consumed by a loop transformation or not /// been intialized. Do not use in this case; bool isValid() const { return Header; } /// The preheader ensures that there is only a single edge entering the loop. /// Code that must be execute before any loop iteration can be emitted here, /// such as computing the loop trip count and begin lifetime markers. Code in /// the preheader is not considered part of the canonical loop. BasicBlock *getPreheader() const; /// The header is the entry for each iteration. In the canonical control flow, /// it only contains the PHINode for the induction variable. BasicBlock *getHeader() const { assert(isValid() && "Requires a valid canonical loop"); return Header; } /// The condition block computes whether there is another loop iteration. If /// yes, branches to the body; otherwise to the exit block. BasicBlock *getCond() const { assert(isValid() && "Requires a valid canonical loop"); return Cond; } /// The body block is the single entry for a loop iteration and not controlled /// by CanonicalLoopInfo. It can contain arbitrary control flow but must /// eventually branch to the \p Latch block. BasicBlock *getBody() const { assert(isValid() && "Requires a valid canonical loop"); return cast<BranchInst>(Cond->getTerminator())->getSuccessor(0); } /// Reaching the latch indicates the end of the loop body code. In the /// canonical control flow, it only contains the increment of the induction /// variable. BasicBlock *getLatch() const { assert(isValid() && "Requires a valid canonical loop"); return Latch; } /// Reaching the exit indicates no more iterations are being executed. BasicBlock *getExit() const { assert(isValid() && "Requires a valid canonical loop"); return Exit; } /// The after block is intended for clean-up code such as lifetime end /// markers. It is separate from the exit block to ensure, analogous to the /// preheader, it having just a single entry edge and being free from PHI /// nodes should there be multiple loop exits (such as from break /// statements/cancellations). BasicBlock *getAfter() const { assert(isValid() && "Requires a valid canonical loop"); return Exit->getSingleSuccessor(); } /// Returns the llvm::Value containing the number of loop iterations. It must /// be valid in the preheader and always interpreted as an unsigned integer of /// any bit-width. Value *getTripCount() const { assert(isValid() && "Requires a valid canonical loop"); Instruction *CmpI = &Cond->front(); assert(isa<CmpInst>(CmpI) && "First inst must compare IV with TripCount"); return CmpI->getOperand(1); } /// Returns the instruction representing the current logical induction /// variable. Always unsigned, always starting at 0 with an increment of one. Instruction *getIndVar() const { assert(isValid() && "Requires a valid canonical loop"); Instruction *IndVarPHI = &Header->front(); assert(isa<PHINode>(IndVarPHI) && "First inst must be the IV PHI"); return IndVarPHI; } /// Return the type of the induction variable (and the trip count). Type *getIndVarType() const { assert(isValid() && "Requires a valid canonical loop"); return getIndVar()->getType(); } /// Return the insertion point for user code before the loop. OpenMPIRBuilder::InsertPointTy getPreheaderIP() const { assert(isValid() && "Requires a valid canonical loop"); BasicBlock *Preheader = getPreheader(); return {Preheader, std::prev(Preheader->end())}; }; /// Return the insertion point for user code in the body. OpenMPIRBuilder::InsertPointTy getBodyIP() const { assert(isValid() && "Requires a valid canonical loop"); BasicBlock *Body = getBody(); return {Body, Body->begin()}; }; /// Return the insertion point for user code after the loop. OpenMPIRBuilder::InsertPointTy getAfterIP() const { assert(isValid() && "Requires a valid canonical loop"); BasicBlock *After = getAfter(); return {After, After->begin()}; }; Function *getFunction() const { assert(isValid() && "Requires a valid canonical loop"); return Header->getParent(); } /// Consistency self-check. void assertOK() const; /// Invalidate this loop. That is, the underlying IR does not fulfill the /// requirements of an OpenMP canonical loop anymore. void invalidate(); }; } // end namespace llvm #endif // LLVM_FRONTEND_OPENMP_OMPIRBUILDER_H
scalability.c
/** * \file * \brief libbomp test. */ /* * Copyright (c) 2007, 2008, 2009, ETH Zurich. * All rights reserved. * * This file is distributed under the terms in the attached LICENSE file. * If you do not find this file, copies can be found by writing to: * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group. */ #include <stdio.h> #include <omp.h> #include <stdlib.h> #include <stdint.h> #include <assert.h> #ifdef POSIX static inline uint64_t rdtsc(void) { uint32_t eax, edx; __asm volatile ("rdtsc" : "=a" (eax), "=d" (edx)); return ((uint64_t)edx << 32) | eax; } #endif #define N 10000000 int main(int argc, char *argv[]) { uint64_t begin, end; int i; static int a[N]; #ifndef POSIX bomp_custom_init(NULL); #endif assert(argc == 2); omp_set_num_threads(atoi(argv[1])); for (i=0;i<N;i++) a[i]= 2*i; begin = rdtsc(); #pragma omp parallel for for (i=0;i<N;i++) a[i]= 2*i; end = rdtsc(); printf("Value of sum is %d, time taken %lu\n", 0, end - begin); }
lastprivate-clause.c
#include <stdio.h> #ifdef _OPENMP #include <omp.h> #else #define omp_get_thread_num() 0 #endif main() { int i, n = 7; int a[n], v; for (i=0; i<n; i++) a[i] = i+1; #pragma omp parallel for lastprivate(v) for (i=0; i<n; i++) { v = a[i]; printf("thread %d v=%d / ",omp_get_thread_num(), v); } printf("\nFuera de la construcción'parallel for' v=%d\n",v); } //Nos permite que la variable declarada como private tenga valor a la salida. //Este valor será el de la variable private de la ultima hebra. //En el for sera la hebra que ejecuta las ultimas iteraciones y la del section //la que ejecute el ultimo section declarado
GB_unop__exp_fc64_fc64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__exp_fc64_fc64) // op(A') function: GB (_unop_tran__exp_fc64_fc64) // C type: GxB_FC64_t // A type: GxB_FC64_t // cast: GxB_FC64_t cij = aij // unaryop: cij = cexp (aij) #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ GxB_FC64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = cexp (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC64_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC64_t z = aij ; \ Cx [pC] = cexp (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_EXP || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__exp_fc64_fc64) ( GxB_FC64_t *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = cexp (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = cexp (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__exp_fc64_fc64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
cgbtrf.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/zgbtrf.c, normal z -> c, Fri Sep 28 17:38:04 2018 * **/ #include "plasma.h" #include "plasma_async.h" #include "plasma_context.h" #include "plasma_descriptor.h" #include "plasma_internal.h" #include "plasma_tuning.h" #include "plasma_types.h" #include "plasma_workspace.h" /***************************************************************************//** * * @ingroup plasma_gbtrf * * Computes an LU factorization of a real m-by-n band matrix A * using partial pivoting with row interchanges. * ******************************************************************************* * * @param[in] m * The number of rows of the matrix A. n >= 0. * * @param[in] n * The number of columns of the matrix A. n >= 0. * * @param[in] kl * The number of subdiagonals within the band of A. kl >= 0. * * @param[in] ku * The number of superdiagonals within the band of A. ku >= 0. * * @param[in,out] AB * Details of the LU factorization of the band matrix A, as * computed by plasma_cgbtrf. * * @param[in] ldab * The leading dimension of the array AB. * * @param[out] ipiv * The pivot indices; for 1 <= i <= min(m,n), row i of the * matrix was interchanged with row ipiv(i). * ******************************************************************************/ int plasma_cgbtrf(int m, int n, int kl, int ku, plasma_complex32_t *pAB, int ldab, int *ipiv) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_fatal_error("PLASMA not initialized"); return PlasmaErrorNotInitialized; } if (m < 0) { plasma_error("illegal value of m"); return -1; } if (n < 0) { plasma_error("illegal value of n"); return -2; } if (kl < 0) { plasma_error("illegal value of kl"); return -3; } if (ku < 0) { plasma_error("illegal value of ku"); return -4; } if (ldab < imax(1, 1+kl+ku)) { plasma_error("illegal value of ldab"); return -6; } // quick return // Tune parameters. if (plasma->tuning) plasma_tune_gbtrf(plasma, PlasmaComplexFloat, n, kl+ku+1); // Set tiling parameters. int nb = plasma->nb; // Initialize barrier. plasma_barrier_init(&plasma->barrier); // Create tile matrix. plasma_desc_t AB; int tku = (ku+kl+nb-1)/nb; // number of tiles in upper band (not including diagonal) int tkl = (kl+nb-1)/nb; // number of tiles in lower band (not including diagonal) int lm = (tku+tkl+1)*nb; // since we use cgetrf on panel, we pivot back within panel. // this could fill the last tile of the panel, // and we need extra NB space on the bottom int retval; retval = plasma_desc_general_band_create(PlasmaComplexFloat, PlasmaGeneral, nb, nb, lm, n, 0, 0, m, n, kl, ku, &AB); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); return retval; } // Initialize sequence. plasma_sequence_t sequence; retval = plasma_sequence_init(&sequence); // Initialize request. plasma_request_t request; retval = plasma_request_init(&request); #pragma omp parallel #pragma omp master { // Translate to tile layout. plasma_omp_cpb2desc(pAB, ldab, AB, &sequence, &request); } #pragma omp parallel #pragma omp master { // Call the tile async function. plasma_omp_cgbtrf(AB, ipiv, &sequence, &request); } #pragma omp parallel #pragma omp master { // Translate back to LAPACK layout. plasma_omp_cdesc2pb(AB, pAB, ldab, &sequence, &request); } // Free matrix A in tile layout. plasma_desc_destroy(&AB); // Return status. int status = sequence.status; return status; } /***************************************************************************//** * * Computes an LU factorization of a real m-by-n band matrix A * using partial pivoting with row interchanges. * Non-blocking tile version of plasma_cgbsv(). * Operates on matrices stored by tiles. * All matrices are passed through descriptors. * All dimensions are taken from the descriptors. * Allows for pipelining of operations at runtime. * ******************************************************************************* * * @param[in,out] AB * Descriptor of matrix A. * * @param[out] ipiv * The pivot indices; for 1 <= i <= min(m,n), row i of the * matrix was interchanged with row ipiv(i). * * @param[in] sequence * Identifies the sequence of function calls that this call belongs to * (for completion checks and exception handling purposes). Check * the sequence->status for errors. * * @param[out] request * Identifies this function call (for exception handling purposes). * * @retval void * Errors are returned by setting sequence->status and * request->status to error values. The sequence->status and * request->status should never be set to PlasmaSuccess (the * initial values) since another async call may be setting a * failure value at the same time. * ******************************************************************************/ void plasma_omp_cgbtrf(plasma_desc_t AB, int *ipiv, plasma_sequence_t *sequence, plasma_request_t *request) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_fatal_error("PLASMA not initialized"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // Check input arguments. if (plasma_desc_check(AB) != PlasmaSuccess) { plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); plasma_error("invalid AB"); return; } if (sequence == NULL) { plasma_fatal_error("NULL sequence"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (request == NULL) { plasma_fatal_error("NULL request"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // Call the parallel function. plasma_pcgbtrf(AB, ipiv, sequence, request); }
SE_fg_int_mex.c
#include "mex.h" #include "../SE_fgg.h" void SE_FGG_MEX_params(SE_FGG_params*, const mxArray*, int); #define X prhs[0] #define HH prhs[1] #define OPT prhs[2] #define PHI_OUT plhs[0] // Output #ifndef VERBOSE #define VERBOSE 0 #endif void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[] ) { const int N = mxGetM(X); double* restrict x = mxGetPr(X); const double* H_per = mxGetPr(HH); SE_FGG_params params; SE_FGG_MEX_params(&params, OPT, N); // scratch arrays SE_FGG_work work; SE_FGG_allocate_workspace(&work, &params,true,false); // output vector PHI_OUT = mxCreateDoubleMatrix(N,1,mxREAL); double* phi = mxGetPr(PHI_OUT); // coordinates and charges const SE_state st = {.x = x, .q = NULL}; if(VERBOSE) mexPrintf("[SE%s FG(I)] N=%d, P=%d\n",PER_STR,N,params.P); #ifdef _OPENMP #pragma omp parallel default(shared) #endif { // now do the work SE_FGG_base_gaussian(&work, &params); #ifdef THREE_PERIODIC SE_FGG_extend_fcn(&work, H_per, &params); #endif #ifdef TWO_PERIODIC SE2P_FGG_extend_fcn(&work, H_per, &params); #endif #ifdef ONE_PERIODIC SE1P_FGG_extend_fcn(&work, H_per, &params); #endif SE_FGG_int(phi, &work, &st, &params); } // done SE_FGG_free_workspace(&work); }
GB_binop__max_int16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__max_int16) // A.*B function (eWiseMult): GB (_AemultB_08__max_int16) // A.*B function (eWiseMult): GB (_AemultB_02__max_int16) // A.*B function (eWiseMult): GB (_AemultB_04__max_int16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__max_int16) // A*D function (colscale): GB (_AxD__max_int16) // D*A function (rowscale): GB (_DxB__max_int16) // C+=B function (dense accum): GB (_Cdense_accumB__max_int16) // C+=b function (dense accum): GB (_Cdense_accumb__max_int16) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__max_int16) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__max_int16) // C=scalar+B GB (_bind1st__max_int16) // C=scalar+B' GB (_bind1st_tran__max_int16) // C=A+scalar GB (_bind2nd__max_int16) // C=A'+scalar GB (_bind2nd_tran__max_int16) // C type: int16_t // A type: int16_t // A pattern? 0 // B type: int16_t // B pattern? 0 // BinaryOp: cij = GB_IMAX (aij, bij) #define GB_ATYPE \ int16_t #define GB_BTYPE \ int16_t #define GB_CTYPE \ int16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int16_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int16_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_IMAX (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MAX || GxB_NO_INT16 || GxB_NO_MAX_INT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__max_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__max_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__max_int16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__max_int16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int16_t int16_t bwork = (*((int16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__max_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__max_int16) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__max_int16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int16_t alpha_scalar ; int16_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int16_t *) alpha_scalar_in)) ; beta_scalar = (*((int16_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__max_int16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__max_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__max_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__max_int16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__max_int16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *Cx = (int16_t *) Cx_output ; int16_t x = (*((int16_t *) x_input)) ; int16_t *Bx = (int16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int16_t bij = GBX (Bx, p, false) ; Cx [p] = GB_IMAX (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__max_int16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int16_t *Cx = (int16_t *) Cx_output ; int16_t *Ax = (int16_t *) Ax_input ; int16_t y = (*((int16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int16_t aij = GBX (Ax, p, false) ; Cx [p] = GB_IMAX (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IMAX (x, aij) ; \ } GrB_Info GB (_bind1st_tran__max_int16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t x = (*((const int16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IMAX (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__max_int16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t y = (*((const int16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
AB.h
#include "stdio.h" #include "omp.h" #pragma omp declare target to(omp_is_initial_device) #pragma omp declare target void Hello(const char *N, int V = 0); template<int val> struct ABTy { int Dummy = 0; int Val = val; }; // static int AB2 = 2; // static ABTy<4> AB4; #pragma omp end declare target void fAB1(); // static void fAB2() { // #pragma omp target // Hello("fAB2"); // } template<int X> void fAB3() { #pragma omp target Hello("fAB3", X); } // // template<int X> // static void fAB4() { // #pragma omp target // Hello("fAB4", X); // } void a(); void b();
checks.c
#include "helper.h" #include "computeCellValues.h" #include "LBDefinitions.h" #include "checks.h" #include <omp.h> void check_in_rank(float *collideField, int *flagField, int * length, int t){ int node[D], i, x, y, z; int n[D] = { length[0] + 2, length[1] + 2, length[2] + 2 }; float velocity[D], density, norm_v=0, *currentCell; /* Pragma usted to parallelize the loop */ #pragma omp parallel for private(node, y, x, density, norm_v, currentCell, velocity, i) /* For each cell in the cavity */ for (z = 1; z < length[2]; ++z){ node[2] = z; for (y = 1; y < length[1]; ++y){ node[1] = y; for (x = 1; x < length[0]; ++x){ node[0] = x; /* Just make the check if the cells are fluid */ if (*getFlag(flagField, node, n) == FLUID){ currentCell = getEl(collideField, node, 0, n); computeDensity(currentCell, &density); computeVelocity(currentCell, &density, velocity); norm_v = 0; /* Compute the norm of the velocity */ for (i = 0; i < D; ++i) norm_v += velocity[i]*velocity[i]; /* If the density is outside the normal values, trow a warning */ if(density>1.1 || density<0.9) printf("Warning: In timestep %d position %d %d %d is an anormal density of %f \n", t, node[0],node[1],node[2],density); /* If the velocity is grather than the speed of sound, trow a warning */ if(norm_v>(3 * C_S * C_S)) printf("Warning: In timestep %d position %d %d %d is an anormal velocity of %f \n", t, node[0],node[1],node[2],norm_v); } } } } } void check_flags(int * flagField, int* length, int flag1, int flag2, int t){ int node[D], node2[D], i, j, x, y, z; int n[D] = { length[0] + 2, length[1] + 2, length[2] + 2 }; /* For each cell in the cavity */ for (z = 1; z < length[2]; ++z){ node[2] = z; for (y = 1; y < length[1]; ++y){ node[1] = y; for (x = 1; x < length[0]; ++x){ node[0] = x; /* Run the checks only if the flag coincides with flag1*/ if (*getFlag(flagField, node, n) == flag1){ /*For each neighbour cell */ for (i = 0; i < Q; ++i){ for (j = 0; j < D; ++j) node2[j]=node[j]+LATTICEVELOCITIES[i][j]; /* If the cell flag coincides with flag2, trow a warning*/ if (*getFlag(flagField, node, n) == flag2) printf("Warning: In timestep %d position %d %d %d a cell %d is adjacent to a forbiden cell %d in %d %d %d \n", t, node[0],node[1],node[2],flag1, node2[0],node2[1],node2[2], flag2); } } } } } } void check_mass(float *massField, int* flagField, int* length, int t){ float tot_mass = 0; int node[D], x, y, z, flag; int n[D] = { length[0] + 2, length[1] + 2, length[2] + 2 }; /* For each cell in the cavity */ for (z = 1; z < length[2]; ++z){ node[2] = z; for (y = 1; y < length[1]; ++y){ node[1] = y; for (x = 1; x < length[0]; ++x){ node[0] = x; flag = *getFlag(flagField, node, n); /* If the cell is fluid or interface, add is mass to a global counter of the cavity */ if (flag == FLUID || flag == INTERFACE) { tot_mass += *getMass(massField, node, n); } } } } printf("On timestep %d : Total mass: %f \n",t,tot_mass); } void run_checks(float *collideField, float *massField, int *flagField, int * length, int t ){ check_in_rank(collideField, flagField, length, t); check_flags(flagField, length, FLUID, GAS, t); check_mass( massField, flagField, length, t); }
GB_unaryop__ainv_uint32_uint16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_uint32_uint16 // op(A') function: GB_tran__ainv_uint32_uint16 // C type: uint32_t // A type: uint16_t // cast: uint32_t cij = (uint32_t) aij // unaryop: cij = -aij #define GB_ATYPE \ uint16_t #define GB_CTYPE \ uint32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, aij) \ uint32_t z = (uint32_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_UINT32 || GxB_NO_UINT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_uint32_uint16 ( uint32_t *Cx, // Cx and Ax may be aliased uint16_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_uint32_uint16 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
ComputeNonbondedBase2.h
/** *** Copyright (c) 1995, 1996, 1997, 1998, 1999, 2000 by *** The Board of Trustees of the University of Illinois. *** All rights reserved. **/ EXCLUDED( FAST( foo bar ) ) EXCLUDED( MODIFIED( foo bar ) ) EXCLUDED( NORMAL( foo bar ) ) NORMAL( MODIFIED( foo bar ) ) ALCHPAIR( NOT_ALCHPAIR( foo bar ) ) ALCHPAIR( // get alchemical nonbonded scaling parameters (once per pairlist) myLambda = ALCH1(lambdaUp) ALCH2(lambdaDown) ALCH3(lambdaUp) ALCH4(lambdaDown); FEP(myLambda2 = ALCH1(lambda2Up) ALCH2(lambda2Down) ALCH3(lambda2Up) ALCH4(lambda2Down);) myElecLambda = ALCH1(elecLambdaUp) ALCH2(elecLambdaDown) ALCH3(elecLambdaUp) ALCH4(elecLambdaDown); FEP(myElecLambda2 = ALCH1(elecLambda2Up) ALCH2(elecLambda2Down) ALCH3(elecLambda2Up) ALCH4(elecLambda2Down);) myVdwLambda = ALCH1(vdwLambdaUp) ALCH2(vdwLambdaDown) ALCH3(vdwLambdaUp) ALCH4(vdwLambdaDown); FEP(myVdwLambda2 = ALCH1(vdwLambda2Up) ALCH2(vdwLambda2Down) ALCH3(vdwLambda2Up) ALCH4(vdwLambda2Down);) ALCH1(myRepLambda = repLambdaUp) ALCH2(myRepLambda = repLambdaDown); FEP(ALCH1(myRepLambda2 = repLambda2Up) ALCH2(myRepLambda2 = repLambda2Down);) ALCH1(myVdwShift = vdwShiftUp) ALCH2(myVdwShift = vdwShiftDown); FEP(ALCH1(myVdwShift2 = vdwShift2Up) ALCH2(myVdwShift2 = vdwShift2Down);) ) #ifdef A2_QPX #if ( SHORT(1+) 0 ) NORMAL(kq_iv = vec_splats(kq_i); ) MODIFIED(kq_iv = vec_splats((1.0-modf_mod) *kq_i); ) #endif #if ( FULL( 1+ ) 0 ) EXCLUDED( SHORT( full_cnst = (vector4double)(6., 4., 2., 1.); ) NOSHORT( full_cnst = (vector4double)(1., 1., 1., 1.); ) ) MODIFIED( SHORT( full_cnst = (vector4double)(6., 4., 2., 1.); full_cnst = vec_mul (full_cnst, vec_splats(modf_mod)); ) NOSHORT( full_cnst = vec_splats(modf_mod); ) ) #endif #endif #ifdef ARCH_POWERPC __alignx(64, table_four); __alignx(32, p_1); #pragma unroll(1) #pragma ibm independent_loop #endif #ifndef ARCH_POWERPC #pragma ivdep #endif #if ( FULL( EXCLUDED( SHORT( 1+ ) ) ) 0 ) // avoid bug in Intel 15.0 compiler #pragma novector #else #ifdef PRAGMA_SIMD #ifndef TABENERGYFLAG #ifndef GOFORCES #pragma omp simd SHORT(FAST(reduction(+:f_i_x,f_i_y,f_i_z)) ENERGY(FAST(reduction(+:vdwEnergy) SHORT(reduction(+:electEnergy))))) \ FULL(reduction(+:fullf_i_x,fullf_i_y,fullf_i_z) ENERGY(reduction(+:fullElectEnergy))) #endif #endif #pragma loop_count avg=100 #else // PRAGMA_SIMD #pragma loop_count avg=4 #endif // PRAGMA_SIMD #endif for (k=0; k<npairi; ++k) { TABENERGY( const int numtypes = simParams->tableNumTypes; const float table_spacing = simParams->tableSpacing; const int npertype = (int) (namdnearbyint(simParams->tableMaxDist / simParams->tableSpacing) + 1); ) int table_i = (r2iilist[2*k] >> 14) + r2_delta_expc; // table_i >= 0 const int j = pairlisti[k]; //register const CompAtom *p_j = p_1 + j; #define p_j (p_1+j) #ifdef A2_QPX register double *p_j_d = (double *) p_j; #endif // const CompAtomExt *pExt_j = pExt_1 + j; BigReal diffa = r2list[k] - r2_table[table_i]; //const BigReal* const table_four_i = table_four + 16*table_i; #define table_four_i (table_four + 16*table_i) #if ( FAST( 1 + ) TABENERGY( 1 + ) 0 ) // FAST or TABENERGY //const LJTable::TableEntry * lj_pars = // lj_row + 2 * p_j->vdwType MODIFIED(+ 1); const int lj_index = 2 * p_j->vdwType MODIFIED(+ 1); #define lj_pars (lj_row+lj_index) #ifdef A2_QPX double *lj_pars_d = (double *) lj_pars; #endif #endif TABENERGY( register const int tabtype = -1 - ( lj_pars->A < 0 ? lj_pars->A : 0 ); ) #if ( SHORT( FAST( 1+ ) ) 0 ) //Force *f_j = f_1 + j; #define f_j (f_1+j) #endif #if ( FULL( 1+ ) 0 ) //Force *fullf_j = fullf_1 + j; #define fullf_j (fullf_1+j) #endif //Power PC aliasing and alignment constraints #ifdef ARCH_POWERPC #if ( FULL( 1+ ) 0 ) #pragma disjoint (*table_four, *fullf_1) #pragma disjoint (*p_1, *fullf_1) #ifdef A2_QPX #pragma disjoint (*p_j_d, *fullf_1) #endif #pragma disjoint (*r2_table, *fullf_1) #pragma disjoint (*r2list, *fullf_1) #if ( SHORT( FAST( 1+ ) ) 0 ) #pragma disjoint (*f_1 , *fullf_1) #pragma disjoint (*fullf_1, *f_1) #endif //Short + fast #endif //Full #if ( SHORT( FAST( 1+ ) ) 0 ) #pragma disjoint (*table_four, *f_1) #pragma disjoint (*p_1, *f_1) #pragma disjoint (*r2_table, *f_1) #pragma disjoint (*r2list, *f_1) #pragma disjoint (*lj_row, *f_1) #ifdef A2_QPX #pragma disjoint (*p_j_d, *f_1) #endif #endif //Short + Fast __alignx(64, table_four_i); FAST ( __alignx(32, lj_pars); ) __alignx(32, p_j); #endif //ARCH_POWERPC /* BigReal modf = 0.0; int atom2 = p_j->id; register char excl_flag = ( (atom2 >= excl_min && atom2 <= excl_max) ? excl_flags[atom2-excl_min] : 0 ); if ( excl_flag ) { ++exclChecksum; } SELF( if ( j < j_hgroup ) { excl_flag = EXCHCK_FULL; } ) if ( excl_flag ) { if ( excl_flag == EXCHCK_FULL ) { lj_pars = lj_null_pars; modf = 1.0; } else { ++lj_pars; modf = modf_mod; } } */ BigReal kqq = kq_i * p_j->charge; #ifdef A2_QPX float * cg = (float *)&p_j->charge; #if ( FULL( 1+ ) 0 ) #pragma disjoint (*cg, *fullf_1) #endif //Full #if ( SHORT( FAST( 1+ ) ) 0 ) #pragma disjoint (*cg, *f_1) #endif //Short + fast #endif LES( BigReal lambda_pair = lambda_table_i[p_j->partition]; ) #ifndef A2_QPX register const BigReal p_ij_x = p_i_x - p_j->position.x; register const BigReal p_ij_y = p_i_y - p_j->position.y; register const BigReal p_ij_z = p_i_z - p_j->position.z; #else vector4double charge_v = vec_lds(0, cg); vector4double kqqv = vec_mul(kq_iv, charge_v ); vector4double p_ij_v = vec_sub(p_i_v, vec_ld (0, p_j_d)); #define p_ij_x vec_extract(p_i_v, 0) #define p_ij_y vec_extract(p_i_v, 1) #define p_ij_z vec_extract(p_i_v, 2) #endif #if ( FAST(1+) 0 ) const BigReal A = scaling * lj_pars->A; const BigReal B = scaling * lj_pars->B; #ifndef A2_QPX BigReal vdw_d = A * table_four_i[0] - B * table_four_i[4]; BigReal vdw_c = A * table_four_i[1] - B * table_four_i[5]; BigReal vdw_b = A * table_four_i[2] - B * table_four_i[6]; BigReal vdw_a = A * table_four_i[3] - B * table_four_i[7]; #else const vector4double Av = vec_mul(scalingv, vec_lds(0, lj_pars_d)); const vector4double Bv = vec_mul(scalingv, vec_lds(8, lj_pars_d)); vector4double vdw_v = vec_msub( Av, vec_ld(0, (BigReal*)table_four_i), vec_mul(Bv, vec_ld(4*sizeof(BigReal), (BigReal*)table_four_i)) ); #define vdw_d vec_extract(vdw_v, 0) #define vdw_c vec_extract(vdw_v, 1) #define vdw_b vec_extract(vdw_v, 2) #define vdw_a vec_extract(vdw_v, 3) #endif ALCHPAIR ( // Alchemical free energy calculation // Pairlist 1 and 2 are for softcore atoms, while 3 and 4 are single topology atoms. // Pairlists are separated so that lambda-coupled pairs are handled // independently from normal nonbonded (inside ALCHPAIR macro). // The separation-shifted van der Waals potential and a shifted // electrostatics potential for decoupling are calculated explicitly. // Would be faster with lookup tables but because only a small minority // of nonbonded pairs are lambda-coupled the impact is minimal. // Explicit calculation also makes things easier to modify. // These are now inline functions (in ComputeNonbondedFep.C) to // tidy the code const BigReal r2 = r2list[k] - r2_delta; // These are now inline functions (in ComputeNonbondedFep.C) to // tidy the code FEP( ALCH1 ( // Don't merge/recombine the ALCH 1, 2, 3 ,4. Their functions might be modified for future algorithm changes. fep_vdw_forceandenergies(A, B, r2, myVdwShift, myVdwShift2, switchdist2, cutoff2, switchfactor, vdwForceSwitching, myVdwLambda, myVdwLambda2, alchWCAOn, myRepLambda, myRepLambda2, &alch_vdw_energy, &alch_vdw_force, &alch_vdw_energy_2);) ALCH2 ( fep_vdw_forceandenergies(A, B, r2, myVdwShift, myVdwShift2, switchdist2, cutoff2, switchfactor, vdwForceSwitching, myVdwLambda, myVdwLambda2, alchWCAOn, myRepLambda, myRepLambda2, &alch_vdw_energy, &alch_vdw_force, &alch_vdw_energy_2);) ALCH3 ( // In single topology region ALCH3 & 4, all atoms are paired so softcore potential is unnecessary. ENERGY(alch_vdw_energy = -myVdwLambda * (( ( diffa * vdw_d * (1/6.)+ vdw_c * (1/4.)) * diffa + vdw_b *(1/2.)) * diffa + vdw_a);) alch_vdw_energy_2 = -myVdwLambda2 * (( ( diffa * vdw_d * (1/6.)+ vdw_c * (1/4.)) * diffa + vdw_b *(1/2.)) * diffa + vdw_a); alch_vdw_force = myVdwLambda * ((diffa * vdw_d + vdw_c) * diffa + vdw_b);) ALCH4 ( ENERGY(alch_vdw_energy = -myVdwLambda * (( ( diffa * vdw_d * (1/6.)+ vdw_c * (1/4.)) * diffa + vdw_b *(1/2.)) * diffa + vdw_a);) alch_vdw_energy_2 = -myVdwLambda2 * (( ( diffa * vdw_d * (1/6.)+ vdw_c * (1/4.)) * diffa + vdw_b *(1/2.)) * diffa + vdw_a); alch_vdw_force = myVdwLambda * ((diffa * vdw_d + vdw_c) * diffa + vdw_b);) ) TI(ti_vdw_force_energy_dUdl(A, B, r2, myVdwShift, switchdist2, cutoff2, switchfactor, vdwForceSwitching, myVdwLambda, alchVdwShiftCoeff, alchWCAOn, myRepLambda, &alch_vdw_energy, &alch_vdw_force, &alch_vdw_dUdl);) ) //NOT_ALCHPAIR( //TABENERGY( #if (NOT_ALCHPAIR(1+) 0) #if (TABENERGY(1+) 0) if (tabtype >= 0) { register BigReal r1; r1 = sqrt(p_ij_x*p_ij_x + p_ij_y*p_ij_y + p_ij_z*p_ij_z); //CkPrintf("%i %i %f %f %i\n", npertype, tabtype, r1, table_spacing, (int) (namdnearbyint(r1 / table_spacing))); register int eneraddress; eneraddress = 2 * ((npertype * tabtype) + ((int) namdnearbyint(r1 / table_spacing))); //CkPrintf("Using distance bin %i for distance %f\n", eneraddress, r1); #ifndef A2_QPX vdw_d = 0.; vdw_c = 0.; vdw_b = table_ener[eneraddress + 1] / r1; vdw_a = (-1/2.) * diffa * vdw_b; #else vec_insert(0., vdw_v, 0); vec_insert(0., vdw_v, 1); vec_insert(table_ener[eneraddress + 1] / r1, vdw_v, 2); vec_insert((-1/2.) * diffa * vdw_b, vdw_v, 3); #endif ENERGY( register BigReal vdw_val = table_ener[eneraddress]; //CkPrintf("Found vdw energy of %f\n", vdw_val); vdwEnergy += LAM(lambda_pair *) vdw_val; FEP( vdwEnergy_s += d_lambda_pair * vdw_val; ) ) } else { //) #endif ENERGY( register BigReal vdw_val = ( ( diffa * vdw_d * (1/6.)+ vdw_c * (1/4.)) * diffa + vdw_b *(1/2.)) * diffa + vdw_a; vdwEnergy -= LAM(lambda_pair *) vdw_val; FEP(vdwEnergy_s -= vdw_val;) ) //TABENERGY( } ) /* endif (tabtype >= 0) */ #if (TABENERGY (1+) 0) } #endif //) // NOT_ALCHPAIR #endif ALCHPAIR( ENERGY(vdwEnergy += alch_vdw_energy;) FEP(vdwEnergy_s += alch_vdw_energy_2;) TI(ALCH1(vdwEnergy_ti_1 += alch_vdw_dUdl;) ALCH2(vdwEnergy_ti_2 += alch_vdw_dUdl;)) ) // ALCHPAIR #endif // FAST #if ( FAST(1+) 0 ) INT( register BigReal vdw_dir; vdw_dir = ( diffa * vdw_d + vdw_c ) * diffa + vdw_b; //BigReal force_r = LAM(lambda_pair *) vdw_dir; reduction[pairVDWForceIndex_X] += force_sign * vdw_dir * p_ij_x; reduction[pairVDWForceIndex_Y] += force_sign * vdw_dir * p_ij_y; reduction[pairVDWForceIndex_Z] += force_sign * vdw_dir * p_ij_z; ) #if ( SHORT(1+) 0 ) // Short-range electrostatics #ifndef A2_QPX NORMAL( BigReal fast_d = kqq * table_four_i[8]; BigReal fast_c = kqq * table_four_i[9]; BigReal fast_b = kqq * table_four_i[10]; BigReal fast_a = kqq * table_four_i[11]; ) MODIFIED( BigReal modfckqq = (1.0-modf_mod) * kqq; BigReal fast_d = modfckqq * table_four_i[8]; BigReal fast_c = modfckqq * table_four_i[9]; BigReal fast_b = modfckqq * table_four_i[10]; BigReal fast_a = modfckqq * table_four_i[11]; ) #else vector4double fastv = vec_mul(kqqv, vec_ld(8 * sizeof(BigReal), (BigReal*)table_four_i)); #define fast_d vec_extract(fastv, 0) #define fast_c vec_extract(fastv, 1) #define fast_b vec_extract(fastv, 2) #define fast_a vec_extract(fastv, 3) #endif { ENERGY( register BigReal fast_val = ( ( diffa * fast_d * (1/6.)+ fast_c * (1/4.)) * diffa + fast_b *(1/2.)) * diffa + fast_a; NOT_ALCHPAIR ( electEnergy -= LAM(lambda_pair *) fast_val; FEP(electEnergy_s -= fast_val;) ) ) //ENERGY ALCHPAIR( ENERGY(electEnergy -= myElecLambda * fast_val;) FEP(electEnergy_s -= myElecLambda2 * fast_val;) TI( NOENERGY(register BigReal fast_val = ( ( diffa * fast_d * (1/6.)+ fast_c * (1/4.)) * diffa + fast_b *(1/2.)) * diffa + fast_a;) ALCH1(electEnergy_ti_1 -= fast_val;) ALCH2(electEnergy_ti_2 -= fast_val;) ) ) INT( register BigReal fast_dir = ( diffa * fast_d + fast_c ) * diffa + fast_b; // force_r -= -1.0 * LAM(lambda_pair *) fast_dir; reduction[pairElectForceIndex_X] += force_sign * fast_dir * p_ij_x; reduction[pairElectForceIndex_Y] += force_sign * fast_dir * p_ij_y; reduction[pairElectForceIndex_Z] += force_sign * fast_dir * p_ij_z; ) } /***** JE - Go *****/ // Now Go energy should appear in VDW place -- put vdw_b back into place #if ( NORMAL (1+) 0) #if ( GO (1+) 0) // JLai #ifndef CODE_REDUNDANT #define CODE_REDUNDANT 0 #endif #if CODE_REDUNDANT if (ComputeNonbondedUtil::goGroPair) { // Explicit goGroPair calculation; only calculates goGroPair if goGroPair is turned on // // get_gro_force has an internal checklist that sees if atom_i and atom_j are // in the explicit pairlist. This is done because there is no guarantee that a // processor will have atom_i and atom_j so we cannot loop over the explict atom pairs. // We can only loop over all pairs. // // NOTE: It does not look like fast_b is not normalized by the r vector. // // JLai BigReal groLJe = 0.0; BigReal groGausse = 0.0; const CompAtomExt *pExt_z = pExt_1 + j; BigReal groForce = mol->get_gro_force2(p_ij_x, p_ij_y, p_ij_z,pExt_i.id,pExt_z->id,&groLJe,&groGausse); NAMD_die("Failsafe. This line should never be reached\n"); #ifndef A2_QPX fast_b += groForce; #else vec_insert(fast_b + groForce, fastv, 2); #endif ENERGY( NOT_ALCHPAIR ( // JLai groLJEnergy += groLJe; groGaussEnergy += groGausse; ) ) //ENERGY } #endif BigReal goNative = 0; BigReal goNonnative = 0; BigReal goForce = 0; register const CompAtomExt *pExt_j = pExt_1 + j; if (ComputeNonbondedUtil::goMethod == 2) { goForce = mol->get_go_force2(p_ij_x, p_ij_y, p_ij_z, pExt_i.id, pExt_j->id,&goNative,&goNonnative); } else { // Ported by JLai -- JE - added ( const BigReal r2go = square(p_ij_x, p_ij_y, p_ij_z); const BigReal rgo = sqrt(r2go); if (ComputeNonbondedUtil::goMethod == 1) { goForce = mol->get_go_force(rgo, pExt_i.id, pExt_j->id, &goNative, &goNonnative); } else if (ComputeNonbondedUtil::goMethod == 3) { goForce = mol->get_go_force_new(rgo, pExt_i.id, pExt_j->id, &goNative, &goNonnative); } else { NAMD_die("I SHOULDN'T BE HERE. DYING MELODRAMATICALLY.\n"); } } #ifndef A2_QPX fast_b += goForce; #else vec_insert(fast_b + goForce, fastv, 2); #endif { ENERGY( NOT_ALCHPAIR ( // JLai goEnergyNative += goNative; goEnergyNonnative += goNonnative; ) ) //ENERGY INT( reduction[pairVDWForceIndex_X] += force_sign * goForce * p_ij_x; reduction[pairVDWForceIndex_Y] += force_sign * goForce * p_ij_y; reduction[pairVDWForceIndex_Z] += force_sign * goForce * p_ij_z; ) } // End of INT //DebugM(3,"rgo:" << rgo << ", pExt_i.id:" << pExt_i.id << ", pExt_j->id:" << pExt_j->id << \ // ", goForce:" << goForce << ", fast_b:" << fast_b << std::endl); #endif // ) // End of GO macro /***** JE - End Go *****/ // End of port JL #endif //) // End of Normal MACRO // Combined short-range electrostatics and VdW force: #if ( NOT_ALCHPAIR(1+) 0) #ifndef A2_QPX fast_d += vdw_d; fast_c += vdw_c; fast_b += vdw_b; fast_a += vdw_a; // not used! #else fastv = vec_add(fastv, vdw_v); #endif #endif register BigReal fast_dir = (diffa * fast_d + fast_c) * diffa + fast_b; BigReal force_r = LAM(lambda_pair *) fast_dir; ALCHPAIR( force_r *= myElecLambda; force_r += alch_vdw_force; // special ALCH forces already multiplied by relevant lambda ) #ifndef NAMD_CUDA #ifndef A2_QPX register BigReal tmp_x = force_r * p_ij_x; f_i_x += tmp_x; f_j->x -= tmp_x; register BigReal tmp_y = force_r * p_ij_y; f_i_y += tmp_y; f_j->y -= tmp_y; register BigReal tmp_z = force_r * p_ij_z; f_i_z += tmp_z; f_j->z -= tmp_z; #else vector4double force_rv = vec_splats (force_r); vector4double tmp_v = vec_mul(force_rv, p_ij_v); f_i_v = vec_add(f_i_v, tmp_v); #define tmp_x vec_extract(tmp_v, 0) #define tmp_y vec_extract(tmp_v, 1) #define tmp_z vec_extract(tmp_v, 2) f_j->x -= tmp_x; f_j->y -= tmp_y; f_j->z -= tmp_z; #endif PPROF( const BigReal p_j_z = p_j->position.z; int n2 = (int)floor((p_j_z-pressureProfileMin)*invThickness); pp_clamp(n2, pressureProfileSlabs); int p_j_partition = p_j->partition; pp_reduction(pressureProfileSlabs, n1, n2, p_i_partition, p_j_partition, pressureProfileAtomTypes, tmp_x*p_ij_x, tmp_y * p_ij_y, tmp_z*p_ij_z, pressureProfileReduction); ) #endif #endif // SHORT #endif // FAST #if ( FULL (EXCLUDED( SHORT ( 1+ ) ) ) 0 ) //const BigReal* const slow_i = slow_table + 4*table_i; #define slow_i (slow_table + 4*table_i) #ifdef ARCH_POWERPC //Alignment and aliasing constraints __alignx (32, slow_table); #if ( SHORT( FAST( 1+ ) ) 0 ) #pragma disjoint (*slow_table, *f_1) #endif #pragma disjoint (*slow_table, *fullf_1) #endif //ARCH_POWERPC #endif //FULL #if ( FULL (MODIFIED( SHORT ( 1+ ) ) ) 0 ) //const BigReal* const slow_i = slow_table + 4*table_i; #define slow_i (slow_table + 4*table_i) #ifdef ARCH_POWERPC //Alignment and aliasing constraints __alignx (32, slow_table); #if ( SHORT( FAST( 1+ ) ) 0 ) #pragma disjoint (*slow_table, *f_1) #endif #pragma disjoint (*slow_table, *fullf_1) #endif //ARCH_POWERPC #endif //FULL #if ( FULL( 1+ ) 0 ) #ifndef A2_QPX BigReal slow_d = table_four_i[8 SHORT(+ 4)]; BigReal slow_c = table_four_i[9 SHORT(+ 4)]; BigReal slow_b = table_four_i[10 SHORT(+ 4)]; BigReal slow_a = table_four_i[11 SHORT(+ 4)]; EXCLUDED( SHORT( slow_a += slow_i[3]; slow_b += 2.*slow_i[2]; slow_c += 4.*slow_i[1]; slow_d += 6.*slow_i[0]; ) NOSHORT( slow_d -= table_four_i[12]; slow_c -= table_four_i[13]; slow_b -= table_four_i[14]; slow_a -= table_four_i[15]; ) ) MODIFIED( SHORT( slow_a += modf_mod * slow_i[3]; slow_b += 2.*modf_mod * slow_i[2]; slow_c += 4.*modf_mod * slow_i[1]; slow_d += 6.*modf_mod * slow_i[0]; ) NOSHORT( slow_d -= modf_mod * table_four_i[12]; slow_c -= modf_mod * table_four_i[13]; slow_b -= modf_mod * table_four_i[14]; slow_a -= modf_mod * table_four_i[15]; ) ) slow_d *= kqq; slow_c *= kqq; slow_b *= kqq; slow_a *= kqq; #else vector4double slow_v = vec_ld((8 SHORT(+ 4)) * sizeof(BigReal), (BigReal*)table_four_i); EXCLUDED( SHORT( slow_v = vec_madd(full_cnst, vec_ld(0, (BigReal*)slow_i), slow_v); ) NOSHORT( slow_v = vec_sub(slow_v, vec_ld(12*sizeof(BigReal), (BigReal*)table_four_i)); ) ); MODIFIED( SHORT( slow_v = vec_madd(full_cnst, vec_ld(0, (BigReal*)slow_i), slow_v); ) NOSHORT( slow_v = vec_nmsub(full_cnst, vec_ld(12*sizeof(BigReal), (BigReal*)table_four_i), slow_v); ) ); slow_v = vec_mul (slow_v, vec_splats(kqq)); #define slow_d vec_extract(slow_v, 0) #define slow_c vec_extract(slow_v, 1) #define slow_b vec_extract(slow_v, 2) #define slow_a vec_extract(slow_v, 3) #endif ENERGY( register BigReal slow_val = ( ( diffa * slow_d *(1/6.)+ slow_c * (1/4.)) * diffa + slow_b *(1/2.)) * diffa + slow_a; NOT_ALCHPAIR ( fullElectEnergy -= LAM(lambda_pair *) slow_val; FEP(fullElectEnergy_s -= slow_val;) ) ) // ENERGY ALCHPAIR( ENERGY(fullElectEnergy -= myElecLambda * slow_val;) FEP(fullElectEnergy_s -= myElecLambda2 * slow_val;) TI( NOENERGY(register BigReal slow_val = ( ( diffa * slow_d *(1/6.)+ slow_c * (1/4.)) * diffa + slow_b *(1/2.)) * diffa + slow_a;) ALCH1(fullElectEnergy_ti_1 -= slow_val;) ALCH2(fullElectEnergy_ti_2 -= slow_val;) ) ) INT( { register BigReal slow_dir = ( diffa * slow_d + slow_c ) * diffa + slow_b; reduction[pairElectForceIndex_X] += force_sign * slow_dir * p_ij_x; reduction[pairElectForceIndex_Y] += force_sign * slow_dir * p_ij_y; reduction[pairElectForceIndex_Z] += force_sign * slow_dir * p_ij_z; } ) #if (NOT_ALCHPAIR (1+) 0) #if (FAST(1+) 0) #if (NOSHORT(1+) 0) #ifndef A2_QPX slow_d += vdw_d; slow_c += vdw_c; slow_b += vdw_b; slow_a += vdw_a; // unused! #else slow_v = vec_add (slow_v, vdw_v); #endif #endif #endif #endif register BigReal slow_dir = (diffa * slow_d + slow_c) * diffa + slow_b; BigReal fullforce_r = slow_dir LAM(* lambda_pair); ALCHPAIR ( fullforce_r *= myElecLambda; FAST( NOSHORT( fullforce_r += alch_vdw_force; )) ) #ifndef NAMD_CUDA { #ifndef A2_QPX register BigReal ftmp_x = fullforce_r * p_ij_x; fullf_i_x += ftmp_x; fullf_j->x -= ftmp_x; register BigReal ftmp_y = fullforce_r * p_ij_y; fullf_i_y += ftmp_y; fullf_j->y -= ftmp_y; register BigReal ftmp_z = fullforce_r * p_ij_z; fullf_i_z += ftmp_z; fullf_j->z -= ftmp_z; #else vector4double fforce_rv = vec_splats (fullforce_r); vector4double ftmp_v = vec_mul(fforce_rv, p_ij_v); fullf_i_v = vec_add(fullf_i_v, ftmp_v); #define ftmp_x vec_extract(ftmp_v, 0) #define ftmp_y vec_extract(ftmp_v, 1) #define ftmp_z vec_extract(ftmp_v, 2) fullf_j->x -= ftmp_x; fullf_j->y -= ftmp_y; fullf_j->z -= ftmp_z; #endif PPROF( const BigReal p_j_z = p_j->position.z; int n2 = (int)floor((p_j_z-pressureProfileMin)*invThickness); pp_clamp(n2, pressureProfileSlabs); int p_j_partition = p_j->partition; pp_reduction(pressureProfileSlabs, n1, n2, p_i_partition, p_j_partition, pressureProfileAtomTypes, ftmp_x*p_ij_x, ftmp_y * p_ij_y, ftmp_z*p_ij_z, pressureProfileReduction); ) } #endif #endif //FULL } // for pairlist #undef p_j #undef lj_pars #undef table_four_i #undef slow_i #undef f_j #undef fullf_j
quantize.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % QQQ U U AAA N N TTTTT IIIII ZZZZZ EEEEE % % Q Q U U A A NN N T I ZZ E % % Q Q U U AAAAA N N N T I ZZZ EEEEE % % Q QQ U U A A N NN T I ZZ E % % QQQQ UUU A A N N T IIIII ZZZZZ EEEEE % % % % % % MagickCore Methods to Reduce the Number of Unique Colors in an Image % % % % Software Design % % John Cristy % % July 1992 % % % % % % Copyright 1999-2011 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Realism in computer graphics typically requires using 24 bits/pixel to % generate an image. Yet many graphic display devices do not contain the % amount of memory necessary to match the spatial and color resolution of % the human eye. The Quantize methods takes a 24 bit image and reduces % the number of colors so it can be displayed on raster device with less % bits per pixel. In most instances, the quantized image closely % resembles the original reference image. % % A reduction of colors in an image is also desirable for image % transmission and real-time animation. % % QuantizeImage() takes a standard RGB or monochrome images and quantizes % them down to some fixed number of colors. % % For purposes of color allocation, an image is a set of n pixels, where % each pixel is a point in RGB space. RGB space is a 3-dimensional % vector space, and each pixel, Pi, is defined by an ordered triple of % red, green, and blue coordinates, (Ri, Gi, Bi). % % Each primary color component (red, green, or blue) represents an % intensity which varies linearly from 0 to a maximum value, Cmax, which % corresponds to full saturation of that color. Color allocation is % defined over a domain consisting of the cube in RGB space with opposite % vertices at (0,0,0) and (Cmax, Cmax, Cmax). QUANTIZE requires Cmax = % 255. % % The algorithm maps this domain onto a tree in which each node % represents a cube within that domain. In the following discussion % these cubes are defined by the coordinate of two opposite vertices: % The vertex nearest the origin in RGB space and the vertex farthest from % the origin. % % The tree's root node represents the entire domain, (0,0,0) through % (Cmax,Cmax,Cmax). Each lower level in the tree is generated by % subdividing one node's cube into eight smaller cubes of equal size. % This corresponds to bisecting the parent cube with planes passing % through the midpoints of each edge. % % The basic algorithm operates in three phases: Classification, % Reduction, and Assignment. Classification builds a color description % tree for the image. Reduction collapses the tree until the number it % represents, at most, the number of colors desired in the output image. % Assignment defines the output image's color map and sets each pixel's % color by restorage_class in the reduced tree. Our goal is to minimize % the numerical discrepancies between the original colors and quantized % colors (quantization error). % % Classification begins by initializing a color description tree of % sufficient depth to represent each possible input color in a leaf. % However, it is impractical to generate a fully-formed color description % tree in the storage_class phase for realistic values of Cmax. If % colors components in the input image are quantized to k-bit precision, % so that Cmax= 2k-1, the tree would need k levels below the root node to % allow representing each possible input color in a leaf. This becomes % prohibitive because the tree's total number of nodes is 1 + % sum(i=1, k, 8k). % % A complete tree would require 19,173,961 nodes for k = 8, Cmax = 255. % Therefore, to avoid building a fully populated tree, QUANTIZE: (1) % Initializes data structures for nodes only as they are needed; (2) % Chooses a maximum depth for the tree as a function of the desired % number of colors in the output image (currently log2(colormap size)). % % For each pixel in the input image, storage_class scans downward from % the root of the color description tree. At each level of the tree it % identifies the single node which represents a cube in RGB space % containing the pixel's color. It updates the following data for each % such node: % % n1: Number of pixels whose color is contained in the RGB cube which % this node represents; % % n2: Number of pixels whose color is not represented in a node at % lower depth in the tree; initially, n2 = 0 for all nodes except % leaves of the tree. % % Sr, Sg, Sb: Sums of the red, green, and blue component values for all % pixels not classified at a lower depth. The combination of these sums % and n2 will ultimately characterize the mean color of a set of % pixels represented by this node. % % E: the distance squared in RGB space between each pixel contained % within a node and the nodes' center. This represents the % quantization error for a node. % % Reduction repeatedly prunes the tree until the number of nodes with n2 % > 0 is less than or equal to the maximum number of colors allowed in % the output image. On any given iteration over the tree, it selects % those nodes whose E count is minimal for pruning and merges their color % statistics upward. It uses a pruning threshold, Ep, to govern node % selection as follows: % % Ep = 0 % while number of nodes with (n2 > 0) > required maximum number of colors % prune all nodes such that E <= Ep % Set Ep to minimum E in remaining nodes % % This has the effect of minimizing any quantization error when merging % two nodes together. % % When a node to be pruned has offspring, the pruning procedure invokes % itself recursively in order to prune the tree from the leaves upward. % n2, Sr, Sg, and Sb in a node being pruned are always added to the % corresponding data in that node's parent. This retains the pruned % node's color characteristics for later averaging. % % For each node, n2 pixels exist for which that node represents the % smallest volume in RGB space containing those pixel's colors. When n2 % > 0 the node will uniquely define a color in the output image. At the % beginning of reduction, n2 = 0 for all nodes except a the leaves of % the tree which represent colors present in the input image. % % The other pixel count, n1, indicates the total number of colors within % the cubic volume which the node represents. This includes n1 - n2 % pixels whose colors should be defined by nodes at a lower level in the % tree. % % Assignment generates the output image from the pruned tree. The output % image consists of two parts: (1) A color map, which is an array of % color descriptions (RGB triples) for each color present in the output % image; (2) A pixel array, which represents each pixel as an index % into the color map array. % % First, the assignment phase makes one pass over the pruned color % description tree to establish the image's color map. For each node % with n2 > 0, it divides Sr, Sg, and Sb by n2 . This produces the mean % color of all pixels that classify no lower than this node. Each of % these colors becomes an entry in the color map. % % Finally, the assignment phase reclassifies each pixel in the pruned % tree to identify the deepest node containing the pixel's color. The % pixel's value in the pixel array becomes the index of this node's mean % color in the color map. % % This method is based on a similar algorithm written by Paul Raveling. % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/cache-view.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colormap.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/enhance.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/histogram.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/memory_.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/pixel-private.h" #include "magick/quantize.h" #include "magick/quantum.h" #include "magick/string_.h" #include "magick/thread-private.h" /* Define declarations. */ #if !defined(__APPLE__) && !defined(TARGET_OS_IPHONE) #define CacheShift 2 #else #define CacheShift 3 #endif #define ErrorQueueLength 16 #define MaxNodes 266817 #define MaxTreeDepth 8 #define NodesInAList 1920 /* Typdef declarations. */ typedef struct _RealPixelPacket { MagickRealType red, green, blue, opacity; } RealPixelPacket; typedef struct _NodeInfo { struct _NodeInfo *parent, *child[16]; MagickSizeType number_unique; RealPixelPacket total_color; MagickRealType quantize_error; size_t color_number, id, level; } NodeInfo; typedef struct _Nodes { NodeInfo *nodes; struct _Nodes *next; } Nodes; typedef struct _CubeInfo { NodeInfo *root; size_t colors, maximum_colors; ssize_t transparent_index; MagickSizeType transparent_pixels; RealPixelPacket target; MagickRealType distance, pruning_threshold, next_threshold; size_t nodes, free_nodes, color_number; NodeInfo *next_node; Nodes *node_queue; ssize_t *cache; RealPixelPacket error[ErrorQueueLength]; MagickRealType weights[ErrorQueueLength]; QuantizeInfo *quantize_info; MagickBooleanType associate_alpha; ssize_t x, y; size_t depth; MagickOffsetType offset; MagickSizeType span; } CubeInfo; /* Method prototypes. */ static CubeInfo *GetCubeInfo(const QuantizeInfo *,const size_t,const size_t); static NodeInfo *GetNodeInfo(CubeInfo *,const size_t,const size_t,NodeInfo *); static MagickBooleanType AssignImageColors(Image *,CubeInfo *), ClassifyImageColors(CubeInfo *,const Image *,ExceptionInfo *), DitherImage(Image *,CubeInfo *), SetGrayscaleImage(Image *); static size_t DefineImageColormap(Image *,CubeInfo *,NodeInfo *); static void ClosestColor(const Image *,CubeInfo *,const NodeInfo *), DestroyCubeInfo(CubeInfo *), PruneLevel(const Image *,CubeInfo *,const NodeInfo *), PruneToCubeDepth(const Image *,CubeInfo *,const NodeInfo *), ReduceImageColors(const Image *,CubeInfo *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e Q u a n t i z e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireQuantizeInfo() allocates the QuantizeInfo structure. % % The format of the AcquireQuantizeInfo method is: % % QuantizeInfo *AcquireQuantizeInfo(const ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: the image info. % */ MagickExport QuantizeInfo *AcquireQuantizeInfo(const ImageInfo *image_info) { QuantizeInfo *quantize_info; quantize_info=(QuantizeInfo *) AcquireMagickMemory(sizeof(*quantize_info)); if (quantize_info == (QuantizeInfo *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); GetQuantizeInfo(quantize_info); if (image_info != (ImageInfo *) NULL) { const char *option; quantize_info->dither=image_info->dither; option=GetImageOption(image_info,"dither"); if (option != (const char *) NULL) quantize_info->dither_method=(DitherMethod) ParseCommandOption( MagickDitherOptions,MagickFalse,option); quantize_info->measure_error=image_info->verbose; } return(quantize_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + A s s i g n I m a g e C o l o r s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AssignImageColors() generates the output image from the pruned tree. The % output image consists of two parts: (1) A color map, which is an array % of color descriptions (RGB triples) for each color present in the % output image; (2) A pixel array, which represents each pixel as an % index into the color map array. % % First, the assignment phase makes one pass over the pruned color % description tree to establish the image's color map. For each node % with n2 > 0, it divides Sr, Sg, and Sb by n2 . This produces the mean % color of all pixels that classify no lower than this node. Each of % these colors becomes an entry in the color map. % % Finally, the assignment phase reclassifies each pixel in the pruned % tree to identify the deepest node containing the pixel's color. The % pixel's value in the pixel array becomes the index of this node's mean % color in the color map. % % The format of the AssignImageColors() method is: % % MagickBooleanType AssignImageColors(Image *image,CubeInfo *cube_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % */ static inline void AssociateAlphaPixel(const CubeInfo *cube_info, const PixelPacket *pixel,RealPixelPacket *alpha_pixel) { MagickRealType alpha; if ((cube_info->associate_alpha == MagickFalse) || (pixel->opacity == OpaqueOpacity)) { alpha_pixel->red=(MagickRealType) GetPixelRed(pixel); alpha_pixel->green=(MagickRealType) GetPixelGreen(pixel); alpha_pixel->blue=(MagickRealType) GetPixelBlue(pixel); alpha_pixel->opacity=(MagickRealType) GetPixelOpacity(pixel); return; } alpha=(MagickRealType) (QuantumScale*(QuantumRange-GetPixelOpacity(pixel))); alpha_pixel->red=alpha*GetPixelRed(pixel); alpha_pixel->green=alpha*GetPixelGreen(pixel); alpha_pixel->blue=alpha*GetPixelBlue(pixel); alpha_pixel->opacity=(MagickRealType) GetPixelOpacity(pixel); } static inline Quantum ClampToUnsignedQuantum(const MagickRealType value) { if (value <= 0.0) return((Quantum) 0); if (value >= QuantumRange) return((Quantum) QuantumRange); return((Quantum) (value+0.5)); } static inline size_t ColorToNodeId(const CubeInfo *cube_info, const RealPixelPacket *pixel,size_t index) { size_t id; id=(size_t) (((ScaleQuantumToChar(ClampToUnsignedQuantum( GetPixelRed(pixel))) >> index) & 0x01) | ((ScaleQuantumToChar( ClampToUnsignedQuantum(GetPixelGreen(pixel))) >> index) & 0x01) << 1 | ((ScaleQuantumToChar(ClampToUnsignedQuantum(GetPixelBlue(pixel))) >> index) & 0x01) << 2); if (cube_info->associate_alpha != MagickFalse) id|=((ScaleQuantumToChar(ClampToUnsignedQuantum(GetPixelOpacity(pixel))) >> index) & 0x1) << 3; return(id); } static inline MagickBooleanType IsSameColor(const Image *image, const PixelPacket *p,const PixelPacket *q) { if ((GetPixelRed(p) != GetPixelRed(q)) || (GetPixelGreen(p) != GetPixelGreen(q)) || (GetPixelBlue(p) != GetPixelBlue(q))) return(MagickFalse); if ((image->matte != MagickFalse) && (GetPixelOpacity(p) != GetPixelOpacity(q))) return(MagickFalse); return(MagickTrue); } static MagickBooleanType AssignImageColors(Image *image,CubeInfo *cube_info) { #define AssignImageTag "Assign/Image" ssize_t y; /* Allocate image colormap. */ if ((cube_info->quantize_info->colorspace != UndefinedColorspace) && (cube_info->quantize_info->colorspace != CMYKColorspace)) (void) TransformImageColorspace((Image *) image, cube_info->quantize_info->colorspace); else if ((image->colorspace != GRAYColorspace) && (IsRGBColorspace(image->colorspace) == MagickFalse) && (image->colorspace != CMYColorspace)) (void) TransformImageColorspace((Image *) image,RGBColorspace); if (AcquireImageColormap(image,cube_info->colors) == MagickFalse) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); image->colors=0; cube_info->transparent_pixels=0; cube_info->transparent_index=(-1); (void) DefineImageColormap(image,cube_info,cube_info->root); /* Create a reduced color image. */ if ((cube_info->quantize_info->dither != MagickFalse) && (cube_info->quantize_info->dither_method != NoDitherMethod)) (void) DitherImage(image,cube_info); else { CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; status=MagickTrue; exception=(&image->exception); image_view=AcquireCacheView(image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(status) #endif for (y=0; y < (ssize_t) image->rows; y++) { CubeInfo cube; register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; ssize_t count; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); cube=(*cube_info); for (x=0; x < (ssize_t) image->columns; x+=count) { RealPixelPacket pixel; register const NodeInfo *node_info; register ssize_t i; size_t id, index; /* Identify the deepest node containing the pixel's color. */ for (count=1; (x+count) < (ssize_t) image->columns; count++) if (IsSameColor(image,q,q+count) == MagickFalse) break; AssociateAlphaPixel(&cube,q,&pixel); node_info=cube.root; for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--) { id=ColorToNodeId(&cube,&pixel,index); if (node_info->child[id] == (NodeInfo *) NULL) break; node_info=node_info->child[id]; } /* Find closest color among siblings and their children. */ cube.target=pixel; cube.distance=(MagickRealType) (4.0*(QuantumRange+1.0)* (QuantumRange+1.0)+1.0); ClosestColor(image,&cube,node_info->parent); index=cube.color_number; for (i=0; i < (ssize_t) count; i++) { if (image->storage_class == PseudoClass) SetPixelIndex(indexes+x+i,index); if (cube.quantize_info->measure_error == MagickFalse) { SetPixelRgb(q,image->colormap+index); if (cube.associate_alpha != MagickFalse) SetPixelOpacity(q,image->colormap[index].opacity); } q++; } } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_AssignImageColors) #endif proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); } if (cube_info->quantize_info->measure_error != MagickFalse) (void) GetImageQuantizeError(image); if ((cube_info->quantize_info->number_colors == 2) && (cube_info->quantize_info->colorspace == GRAYColorspace)) { Quantum intensity; register PixelPacket *restrict q; register ssize_t i; /* Monochrome image. */ q=image->colormap; for (i=0; i < (ssize_t) image->colors; i++) { intensity=(Quantum) (PixelIntensity(q) < ((MagickRealType) QuantumRange/2.0) ? 0 : QuantumRange); SetPixelRed(q,intensity); SetPixelGreen(q,intensity); SetPixelBlue(q,intensity); q++; } } (void) SyncImage(image); if ((cube_info->quantize_info->colorspace != UndefinedColorspace) && (cube_info->quantize_info->colorspace != CMYKColorspace)) (void) TransformImageColorspace((Image *) image,RGBColorspace); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l a s s i f y I m a g e C o l o r s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClassifyImageColors() begins by initializing a color description tree % of sufficient depth to represent each possible input color in a leaf. % However, it is impractical to generate a fully-formed color % description tree in the storage_class phase for realistic values of % Cmax. If colors components in the input image are quantized to k-bit % precision, so that Cmax= 2k-1, the tree would need k levels below the % root node to allow representing each possible input color in a leaf. % This becomes prohibitive because the tree's total number of nodes is % 1 + sum(i=1,k,8k). % % A complete tree would require 19,173,961 nodes for k = 8, Cmax = 255. % Therefore, to avoid building a fully populated tree, QUANTIZE: (1) % Initializes data structures for nodes only as they are needed; (2) % Chooses a maximum depth for the tree as a function of the desired % number of colors in the output image (currently log2(colormap size)). % % For each pixel in the input image, storage_class scans downward from % the root of the color description tree. At each level of the tree it % identifies the single node which represents a cube in RGB space % containing It updates the following data for each such node: % % n1 : Number of pixels whose color is contained in the RGB cube % which this node represents; % % n2 : Number of pixels whose color is not represented in a node at % lower depth in the tree; initially, n2 = 0 for all nodes except % leaves of the tree. % % Sr, Sg, Sb : Sums of the red, green, and blue component values for % all pixels not classified at a lower depth. The combination of % these sums and n2 will ultimately characterize the mean color of a % set of pixels represented by this node. % % E: the distance squared in RGB space between each pixel contained % within a node and the nodes' center. This represents the quantization % error for a node. % % The format of the ClassifyImageColors() method is: % % MagickBooleanType ClassifyImageColors(CubeInfo *cube_info, % const Image *image,ExceptionInfo *exception) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o image: the image. % */ static inline void SetAssociatedAlpha(const Image *image,CubeInfo *cube_info) { MagickBooleanType associate_alpha; associate_alpha=image->matte; if (cube_info->quantize_info->colorspace == TransparentColorspace) associate_alpha=MagickFalse; if ((cube_info->quantize_info->number_colors == 2) && (cube_info->quantize_info->colorspace == GRAYColorspace)) associate_alpha=MagickFalse; cube_info->associate_alpha=associate_alpha; } static MagickBooleanType ClassifyImageColors(CubeInfo *cube_info, const Image *image,ExceptionInfo *exception) { #define ClassifyImageTag "Classify/Image" CacheView *image_view; MagickBooleanType proceed; MagickRealType bisect; NodeInfo *node_info; RealPixelPacket error, mid, midpoint, pixel; size_t count, id, index, level; ssize_t y; /* Classify the first cube_info->maximum_colors colors to a tree depth of 8. */ SetAssociatedAlpha(image,cube_info); if ((cube_info->quantize_info->colorspace != UndefinedColorspace) && (cube_info->quantize_info->colorspace != CMYKColorspace)) (void) TransformImageColorspace((Image *) image, cube_info->quantize_info->colorspace); else if ((image->colorspace != GRAYColorspace) && (image->colorspace != CMYColorspace) && (IsRGBColorspace(image->colorspace) == MagickFalse)) (void) TransformImageColorspace((Image *) image,RGBColorspace); midpoint.red=(MagickRealType) QuantumRange/2.0; midpoint.green=(MagickRealType) QuantumRange/2.0; midpoint.blue=(MagickRealType) QuantumRange/2.0; midpoint.opacity=(MagickRealType) QuantumRange/2.0; error.opacity=0.0; image_view=AcquireCacheView(image); for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *restrict p; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; if (cube_info->nodes > MaxNodes) { /* Prune one level if the color tree is too large. */ PruneLevel(image,cube_info,cube_info->root); cube_info->depth--; } for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) count) { /* Start at the root and descend the color cube tree. */ for (count=1; (x+(ssize_t) count) < (ssize_t) image->columns; count++) if (IsSameColor(image,p,p+count) == MagickFalse) break; AssociateAlphaPixel(cube_info,p,&pixel); index=MaxTreeDepth-1; bisect=((MagickRealType) QuantumRange+1.0)/2.0; mid=midpoint; node_info=cube_info->root; for (level=1; level <= MaxTreeDepth; level++) { bisect*=0.5; id=ColorToNodeId(cube_info,&pixel,index); mid.red+=(id & 1) != 0 ? bisect : -bisect; mid.green+=(id & 2) != 0 ? bisect : -bisect; mid.blue+=(id & 4) != 0 ? bisect : -bisect; mid.opacity+=(id & 8) != 0 ? bisect : -bisect; if (node_info->child[id] == (NodeInfo *) NULL) { /* Set colors of new node to contain pixel. */ node_info->child[id]=GetNodeInfo(cube_info,id,level,node_info); if (node_info->child[id] == (NodeInfo *) NULL) (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); if (level == MaxTreeDepth) cube_info->colors++; } /* Approximate the quantization error represented by this node. */ node_info=node_info->child[id]; error.red=QuantumScale*(pixel.red-mid.red); error.green=QuantumScale*(pixel.green-mid.green); error.blue=QuantumScale*(pixel.blue-mid.blue); if (cube_info->associate_alpha != MagickFalse) error.opacity=QuantumScale*(pixel.opacity-mid.opacity); node_info->quantize_error+=sqrt((double) (count*error.red*error.red+ count*error.green*error.green+count*error.blue*error.blue+ count*error.opacity*error.opacity)); cube_info->root->quantize_error+=node_info->quantize_error; index--; } /* Sum RGB for this leaf for later derivation of the mean cube color. */ node_info->number_unique+=count; node_info->total_color.red+=count*QuantumScale*pixel.red; node_info->total_color.green+=count*QuantumScale*pixel.green; node_info->total_color.blue+=count*QuantumScale*pixel.blue; if (cube_info->associate_alpha != MagickFalse) node_info->total_color.opacity+=count*QuantumScale*pixel.opacity; p+=count; } if (cube_info->colors > cube_info->maximum_colors) { PruneToCubeDepth(image,cube_info,cube_info->root); break; } proceed=SetImageProgress(image,ClassifyImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) break; } for (y++; y < (ssize_t) image->rows; y++) { register const PixelPacket *restrict p; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; if (cube_info->nodes > MaxNodes) { /* Prune one level if the color tree is too large. */ PruneLevel(image,cube_info,cube_info->root); cube_info->depth--; } for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) count) { /* Start at the root and descend the color cube tree. */ for (count=1; (x+(ssize_t) count) < (ssize_t) image->columns; count++) if (IsSameColor(image,p,p+count) == MagickFalse) break; AssociateAlphaPixel(cube_info,p,&pixel); index=MaxTreeDepth-1; bisect=((MagickRealType) QuantumRange+1.0)/2.0; mid=midpoint; node_info=cube_info->root; for (level=1; level <= cube_info->depth; level++) { bisect*=0.5; id=ColorToNodeId(cube_info,&pixel,index); mid.red+=(id & 1) != 0 ? bisect : -bisect; mid.green+=(id & 2) != 0 ? bisect : -bisect; mid.blue+=(id & 4) != 0 ? bisect : -bisect; mid.opacity+=(id & 8) != 0 ? bisect : -bisect; if (node_info->child[id] == (NodeInfo *) NULL) { /* Set colors of new node to contain pixel. */ node_info->child[id]=GetNodeInfo(cube_info,id,level,node_info); if (node_info->child[id] == (NodeInfo *) NULL) (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","%s", image->filename); if (level == cube_info->depth) cube_info->colors++; } /* Approximate the quantization error represented by this node. */ node_info=node_info->child[id]; error.red=QuantumScale*(pixel.red-mid.red); error.green=QuantumScale*(pixel.green-mid.green); error.blue=QuantumScale*(pixel.blue-mid.blue); if (cube_info->associate_alpha != MagickFalse) error.opacity=QuantumScale*(pixel.opacity-mid.opacity); node_info->quantize_error+=sqrt((double) (count*error.red*error.red+ count*error.green*error.green+count*error.blue*error.blue+ count*error.opacity*error.opacity)); cube_info->root->quantize_error+=node_info->quantize_error; index--; } /* Sum RGB for this leaf for later derivation of the mean cube color. */ node_info->number_unique+=count; node_info->total_color.red+=count*QuantumScale*pixel.red; node_info->total_color.green+=count*QuantumScale*pixel.green; node_info->total_color.blue+=count*QuantumScale*pixel.blue; if (cube_info->associate_alpha != MagickFalse) node_info->total_color.opacity+=count*QuantumScale*pixel.opacity; p+=count; } proceed=SetImageProgress(image,ClassifyImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) break; } image_view=DestroyCacheView(image_view); if ((cube_info->quantize_info->colorspace != UndefinedColorspace) && (cube_info->quantize_info->colorspace != CMYKColorspace)) (void) TransformImageColorspace((Image *) image,RGBColorspace); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e Q u a n t i z e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneQuantizeInfo() makes a duplicate of the given quantize info structure, % or if quantize info is NULL, a new one. % % The format of the CloneQuantizeInfo method is: % % QuantizeInfo *CloneQuantizeInfo(const QuantizeInfo *quantize_info) % % A description of each parameter follows: % % o clone_info: Method CloneQuantizeInfo returns a duplicate of the given % quantize info, or if image info is NULL a new one. % % o quantize_info: a structure of type info. % */ MagickExport QuantizeInfo *CloneQuantizeInfo(const QuantizeInfo *quantize_info) { QuantizeInfo *clone_info; clone_info=(QuantizeInfo *) AcquireMagickMemory(sizeof(*clone_info)); if (clone_info == (QuantizeInfo *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); GetQuantizeInfo(clone_info); if (quantize_info == (QuantizeInfo *) NULL) return(clone_info); clone_info->number_colors=quantize_info->number_colors; clone_info->tree_depth=quantize_info->tree_depth; clone_info->dither=quantize_info->dither; clone_info->dither_method=quantize_info->dither_method; clone_info->colorspace=quantize_info->colorspace; clone_info->measure_error=quantize_info->measure_error; return(clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l o s e s t C o l o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClosestColor() traverses the color cube tree at a particular node and % determines which colormap entry best represents the input color. % % The format of the ClosestColor method is: % % void ClosestColor(const Image *image,CubeInfo *cube_info, % const NodeInfo *node_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % % o node_info: the address of a structure of type NodeInfo which points to a % node in the color cube tree that is to be pruned. % */ static void ClosestColor(const Image *image,CubeInfo *cube_info, const NodeInfo *node_info) { register ssize_t i; size_t number_children; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) ClosestColor(image,cube_info,node_info->child[i]); if (node_info->number_unique != 0) { MagickRealType pixel; register MagickRealType alpha, beta, distance; register PixelPacket *restrict p; register RealPixelPacket *restrict q; /* Determine if this color is "closest". */ p=image->colormap+node_info->color_number; q=(&cube_info->target); alpha=1.0; beta=1.0; if (cube_info->associate_alpha != MagickFalse) { alpha=(MagickRealType) (QuantumScale*GetPixelAlpha(p)); beta=(MagickRealType) (QuantumScale*GetPixelAlpha(q)); } pixel=alpha*GetPixelRed(p)-beta*GetPixelRed(q); distance=pixel*pixel; if (distance <= cube_info->distance) { pixel=alpha*GetPixelGreen(p)-beta*GetPixelGreen(q); distance+=pixel*pixel; if (distance <= cube_info->distance) { pixel=alpha*GetPixelBlue(p)-beta*GetPixelBlue(q); distance+=pixel*pixel; if (distance <= cube_info->distance) { pixel=alpha-beta; distance+=pixel*pixel; if (distance <= cube_info->distance) { cube_info->distance=distance; cube_info->color_number=node_info->color_number; } } } } } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o m p r e s s I m a g e C o l o r m a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CompressImageColormap() compresses an image colormap by removing any % duplicate or unused color entries. % % The format of the CompressImageColormap method is: % % MagickBooleanType CompressImageColormap(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType CompressImageColormap(Image *image) { QuantizeInfo quantize_info; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (IsPaletteImage(image,&image->exception) == MagickFalse) return(MagickFalse); GetQuantizeInfo(&quantize_info); quantize_info.number_colors=image->colors; quantize_info.tree_depth=MaxTreeDepth; return(QuantizeImage(&quantize_info,image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e f i n e I m a g e C o l o r m a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DefineImageColormap() traverses the color cube tree and notes each colormap % entry. A colormap entry is any node in the color cube tree where the % of unique colors is not zero. DefineImageColormap() returns the number of % colors in the image colormap. % % The format of the DefineImageColormap method is: % % size_t DefineImageColormap(Image *image,CubeInfo *cube_info, % NodeInfo *node_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % % o node_info: the address of a structure of type NodeInfo which points to a % node in the color cube tree that is to be pruned. % */ static size_t DefineImageColormap(Image *image,CubeInfo *cube_info, NodeInfo *node_info) { register ssize_t i; size_t number_children; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) (void) DefineImageColormap(image,cube_info,node_info->child[i]); if (node_info->number_unique != 0) { register MagickRealType alpha; register PixelPacket *restrict q; /* Colormap entry is defined by the mean color in this cube. */ q=image->colormap+image->colors; alpha=(MagickRealType) ((MagickOffsetType) node_info->number_unique); alpha=1.0/(fabs(alpha) <= MagickEpsilon ? 1.0 : alpha); if (cube_info->associate_alpha == MagickFalse) { SetPixelRed(q,ClampToQuantum((MagickRealType) (alpha* QuantumRange*node_info->total_color.red))); SetPixelGreen(q,ClampToQuantum((MagickRealType) (alpha* QuantumRange*node_info->total_color.green))); SetPixelBlue(q,ClampToQuantum((MagickRealType) (alpha* QuantumRange*node_info->total_color.blue))); SetPixelOpacity(q,OpaqueOpacity); } else { MagickRealType opacity; opacity=(MagickRealType) (alpha*QuantumRange* node_info->total_color.opacity); SetPixelOpacity(q,ClampToQuantum(opacity)); if (q->opacity == OpaqueOpacity) { SetPixelRed(q,ClampToQuantum((MagickRealType) (alpha* QuantumRange*node_info->total_color.red))); SetPixelGreen(q,ClampToQuantum((MagickRealType) (alpha* QuantumRange*node_info->total_color.green))); SetPixelBlue(q,ClampToQuantum((MagickRealType) (alpha* QuantumRange*node_info->total_color.blue))); } else { MagickRealType gamma; gamma=(MagickRealType) (QuantumScale*(QuantumRange- (MagickRealType) q->opacity)); gamma=1.0/(fabs(gamma) <= MagickEpsilon ? 1.0 : gamma); SetPixelRed(q,ClampToQuantum((MagickRealType) (alpha* gamma*QuantumRange*node_info->total_color.red))); SetPixelGreen(q,ClampToQuantum((MagickRealType) (alpha* gamma*QuantumRange*node_info->total_color.green))); SetPixelBlue(q,ClampToQuantum((MagickRealType) ( alpha*gamma*QuantumRange*node_info->total_color.blue))); if (node_info->number_unique > cube_info->transparent_pixels) { cube_info->transparent_pixels=node_info->number_unique; cube_info->transparent_index=(ssize_t) image->colors; } } } node_info->color_number=image->colors++; } return(image->colors); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y C u b e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyCubeInfo() deallocates memory associated with an image. % % The format of the DestroyCubeInfo method is: % % DestroyCubeInfo(CubeInfo *cube_info) % % A description of each parameter follows: % % o cube_info: the address of a structure of type CubeInfo. % */ static void DestroyCubeInfo(CubeInfo *cube_info) { register Nodes *nodes; /* Release color cube tree storage. */ do { nodes=cube_info->node_queue->next; cube_info->node_queue->nodes=(NodeInfo *) RelinquishMagickMemory( cube_info->node_queue->nodes); cube_info->node_queue=(Nodes *) RelinquishMagickMemory( cube_info->node_queue); cube_info->node_queue=nodes; } while (cube_info->node_queue != (Nodes *) NULL); if (cube_info->cache != (ssize_t *) NULL) cube_info->cache=(ssize_t *) RelinquishMagickMemory(cube_info->cache); cube_info->quantize_info=DestroyQuantizeInfo(cube_info->quantize_info); cube_info=(CubeInfo *) RelinquishMagickMemory(cube_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y Q u a n t i z e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyQuantizeInfo() deallocates memory associated with an QuantizeInfo % structure. % % The format of the DestroyQuantizeInfo method is: % % QuantizeInfo *DestroyQuantizeInfo(QuantizeInfo *quantize_info) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % */ MagickExport QuantizeInfo *DestroyQuantizeInfo(QuantizeInfo *quantize_info) { (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(quantize_info != (QuantizeInfo *) NULL); assert(quantize_info->signature == MagickSignature); quantize_info->signature=(~MagickSignature); quantize_info=(QuantizeInfo *) RelinquishMagickMemory(quantize_info); return(quantize_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D i t h e r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DitherImage() distributes the difference between an original image and % the corresponding color reduced algorithm to neighboring pixels using % serpentine-scan Floyd-Steinberg error diffusion. DitherImage returns % MagickTrue if the image is dithered otherwise MagickFalse. % % The format of the DitherImage method is: % % MagickBooleanType DitherImage(Image *image,CubeInfo *cube_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % */ static RealPixelPacket **DestroyPixelThreadSet(RealPixelPacket **pixels) { register ssize_t i; assert(pixels != (RealPixelPacket **) NULL); for (i=0; i < (ssize_t) GetOpenMPMaximumThreads(); i++) if (pixels[i] != (RealPixelPacket *) NULL) pixels[i]=(RealPixelPacket *) RelinquishMagickMemory(pixels[i]); pixels=(RealPixelPacket **) RelinquishMagickMemory(pixels); return(pixels); } static RealPixelPacket **AcquirePixelThreadSet(const size_t count) { RealPixelPacket **pixels; register ssize_t i; size_t number_threads; number_threads=GetOpenMPMaximumThreads(); pixels=(RealPixelPacket **) AcquireQuantumMemory(number_threads, sizeof(*pixels)); if (pixels == (RealPixelPacket **) NULL) return((RealPixelPacket **) NULL); (void) ResetMagickMemory(pixels,0,number_threads*sizeof(*pixels)); for (i=0; i < (ssize_t) number_threads; i++) { pixels[i]=(RealPixelPacket *) AcquireQuantumMemory(count, 2*sizeof(**pixels)); if (pixels[i] == (RealPixelPacket *) NULL) return(DestroyPixelThreadSet(pixels)); } return(pixels); } static inline ssize_t CacheOffset(CubeInfo *cube_info, const RealPixelPacket *pixel) { #define RedShift(pixel) (((pixel) >> CacheShift) << (0*(8-CacheShift))) #define GreenShift(pixel) (((pixel) >> CacheShift) << (1*(8-CacheShift))) #define BlueShift(pixel) (((pixel) >> CacheShift) << (2*(8-CacheShift))) #define AlphaShift(pixel) (((pixel) >> CacheShift) << (3*(8-CacheShift))) ssize_t offset; offset=(ssize_t) (RedShift(ScaleQuantumToChar(ClampToUnsignedQuantum(pixel->red))) | GreenShift(ScaleQuantumToChar(ClampToUnsignedQuantum(pixel->green))) | BlueShift(ScaleQuantumToChar(ClampToUnsignedQuantum(pixel->blue)))); if (cube_info->associate_alpha != MagickFalse) offset|=AlphaShift(ScaleQuantumToChar(ClampToUnsignedQuantum( pixel->opacity))); return(offset); } static MagickBooleanType FloydSteinbergDither(Image *image,CubeInfo *cube_info) { #define DitherImageTag "Dither/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; RealPixelPacket **pixels; ssize_t y; /* Distribute quantization error using Floyd-Steinberg. */ pixels=AcquirePixelThreadSet(image->columns); if (pixels == (RealPixelPacket **) NULL) return(MagickFalse); exception=(&image->exception); status=MagickTrue; image_view=AcquireCacheView(image); for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); CubeInfo cube; RealPixelPacket *current, *previous; register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; size_t index; ssize_t v; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); cube=(*cube_info); current=pixels[id]+(y & 0x01)*image->columns; previous=pixels[id]+((y+1) & 0x01)*image->columns; v=(ssize_t) ((y & 0x01) ? -1 : 1); for (x=0; x < (ssize_t) image->columns; x++) { RealPixelPacket color, pixel; register ssize_t i; ssize_t u; u=(y & 0x01) ? (ssize_t) image->columns-1-x : x; AssociateAlphaPixel(&cube,q+u,&pixel); if (x > 0) { pixel.red+=7*current[u-v].red/16; pixel.green+=7*current[u-v].green/16; pixel.blue+=7*current[u-v].blue/16; if (cube.associate_alpha != MagickFalse) pixel.opacity+=7*current[u-v].opacity/16; } if (y > 0) { if (x < (ssize_t) (image->columns-1)) { pixel.red+=previous[u+v].red/16; pixel.green+=previous[u+v].green/16; pixel.blue+=previous[u+v].blue/16; if (cube.associate_alpha != MagickFalse) pixel.opacity+=previous[u+v].opacity/16; } pixel.red+=5*previous[u].red/16; pixel.green+=5*previous[u].green/16; pixel.blue+=5*previous[u].blue/16; if (cube.associate_alpha != MagickFalse) pixel.opacity+=5*previous[u].opacity/16; if (x > 0) { pixel.red+=3*previous[u-v].red/16; pixel.green+=3*previous[u-v].green/16; pixel.blue+=3*previous[u-v].blue/16; if (cube.associate_alpha != MagickFalse) pixel.opacity+=3*previous[u-v].opacity/16; } } pixel.red=(MagickRealType) ClampToUnsignedQuantum(pixel.red); pixel.green=(MagickRealType) ClampToUnsignedQuantum(pixel.green); pixel.blue=(MagickRealType) ClampToUnsignedQuantum(pixel.blue); if (cube.associate_alpha != MagickFalse) pixel.opacity=(MagickRealType) ClampToUnsignedQuantum(pixel.opacity); i=CacheOffset(&cube,&pixel); if (cube.cache[i] < 0) { register NodeInfo *node_info; register size_t id; /* Identify the deepest node containing the pixel's color. */ node_info=cube.root; for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--) { id=ColorToNodeId(&cube,&pixel,index); if (node_info->child[id] == (NodeInfo *) NULL) break; node_info=node_info->child[id]; } /* Find closest color among siblings and their children. */ cube.target=pixel; cube.distance=(MagickRealType) (4.0*(QuantumRange+1.0)*(QuantumRange+ 1.0)+1.0); ClosestColor(image,&cube,node_info->parent); cube.cache[i]=(ssize_t) cube.color_number; } /* Assign pixel to closest colormap entry. */ index=(size_t) cube.cache[i]; if (image->storage_class == PseudoClass) SetPixelIndex(indexes+u,index); if (cube.quantize_info->measure_error == MagickFalse) { SetPixelRgb(q+u,image->colormap+index); if (cube.associate_alpha != MagickFalse) SetPixelOpacity(q+u,image->colormap[index].opacity); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; /* Store the error. */ AssociateAlphaPixel(&cube,image->colormap+index,&color); current[u].red=pixel.red-color.red; current[u].green=pixel.green-color.green; current[u].blue=pixel.blue-color.blue; if (cube.associate_alpha != MagickFalse) current[u].opacity=pixel.opacity-color.opacity; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_FloydSteinbergDither) #endif proceed=SetImageProgress(image,DitherImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } } image_view=DestroyCacheView(image_view); pixels=DestroyPixelThreadSet(pixels); return(MagickTrue); } static MagickBooleanType RiemersmaDither(Image *,CacheView *,CubeInfo *,const unsigned int); static void Riemersma(Image *image,CacheView *image_view,CubeInfo *cube_info, const size_t level,const unsigned int direction) { if (level == 1) switch (direction) { case WestGravity: { (void) RiemersmaDither(image,image_view,cube_info,EastGravity); (void) RiemersmaDither(image,image_view,cube_info,SouthGravity); (void) RiemersmaDither(image,image_view,cube_info,WestGravity); break; } case EastGravity: { (void) RiemersmaDither(image,image_view,cube_info,WestGravity); (void) RiemersmaDither(image,image_view,cube_info,NorthGravity); (void) RiemersmaDither(image,image_view,cube_info,EastGravity); break; } case NorthGravity: { (void) RiemersmaDither(image,image_view,cube_info,SouthGravity); (void) RiemersmaDither(image,image_view,cube_info,EastGravity); (void) RiemersmaDither(image,image_view,cube_info,NorthGravity); break; } case SouthGravity: { (void) RiemersmaDither(image,image_view,cube_info,NorthGravity); (void) RiemersmaDither(image,image_view,cube_info,WestGravity); (void) RiemersmaDither(image,image_view,cube_info,SouthGravity); break; } default: break; } else switch (direction) { case WestGravity: { Riemersma(image,image_view,cube_info,level-1,NorthGravity); (void) RiemersmaDither(image,image_view,cube_info,EastGravity); Riemersma(image,image_view,cube_info,level-1,WestGravity); (void) RiemersmaDither(image,image_view,cube_info,SouthGravity); Riemersma(image,image_view,cube_info,level-1,WestGravity); (void) RiemersmaDither(image,image_view,cube_info,WestGravity); Riemersma(image,image_view,cube_info,level-1,SouthGravity); break; } case EastGravity: { Riemersma(image,image_view,cube_info,level-1,SouthGravity); (void) RiemersmaDither(image,image_view,cube_info,WestGravity); Riemersma(image,image_view,cube_info,level-1,EastGravity); (void) RiemersmaDither(image,image_view,cube_info,NorthGravity); Riemersma(image,image_view,cube_info,level-1,EastGravity); (void) RiemersmaDither(image,image_view,cube_info,EastGravity); Riemersma(image,image_view,cube_info,level-1,NorthGravity); break; } case NorthGravity: { Riemersma(image,image_view,cube_info,level-1,WestGravity); (void) RiemersmaDither(image,image_view,cube_info,SouthGravity); Riemersma(image,image_view,cube_info,level-1,NorthGravity); (void) RiemersmaDither(image,image_view,cube_info,EastGravity); Riemersma(image,image_view,cube_info,level-1,NorthGravity); (void) RiemersmaDither(image,image_view,cube_info,NorthGravity); Riemersma(image,image_view,cube_info,level-1,EastGravity); break; } case SouthGravity: { Riemersma(image,image_view,cube_info,level-1,EastGravity); (void) RiemersmaDither(image,image_view,cube_info,NorthGravity); Riemersma(image,image_view,cube_info,level-1,SouthGravity); (void) RiemersmaDither(image,image_view,cube_info,WestGravity); Riemersma(image,image_view,cube_info,level-1,SouthGravity); (void) RiemersmaDither(image,image_view,cube_info,SouthGravity); Riemersma(image,image_view,cube_info,level-1,WestGravity); break; } default: break; } } static MagickBooleanType RiemersmaDither(Image *image,CacheView *image_view, CubeInfo *cube_info,const unsigned int direction) { #define DitherImageTag "Dither/Image" MagickBooleanType proceed; RealPixelPacket color, pixel; register CubeInfo *p; size_t index; p=cube_info; if ((p->x >= 0) && (p->x < (ssize_t) image->columns) && (p->y >= 0) && (p->y < (ssize_t) image->rows)) { ExceptionInfo *exception; register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t i; /* Distribute error. */ exception=(&image->exception); q=GetCacheViewAuthenticPixels(image_view,p->x,p->y,1,1,exception); if (q == (PixelPacket *) NULL) return(MagickFalse); indexes=GetCacheViewAuthenticIndexQueue(image_view); AssociateAlphaPixel(cube_info,q,&pixel); for (i=0; i < ErrorQueueLength; i++) { pixel.red+=p->weights[i]*p->error[i].red; pixel.green+=p->weights[i]*p->error[i].green; pixel.blue+=p->weights[i]*p->error[i].blue; if (cube_info->associate_alpha != MagickFalse) pixel.opacity+=p->weights[i]*p->error[i].opacity; } pixel.red=(MagickRealType) ClampToUnsignedQuantum(pixel.red); pixel.green=(MagickRealType) ClampToUnsignedQuantum(pixel.green); pixel.blue=(MagickRealType) ClampToUnsignedQuantum(pixel.blue); if (cube_info->associate_alpha != MagickFalse) pixel.opacity=(MagickRealType) ClampToUnsignedQuantum(pixel.opacity); i=CacheOffset(cube_info,&pixel); if (p->cache[i] < 0) { register NodeInfo *node_info; register size_t id; /* Identify the deepest node containing the pixel's color. */ node_info=p->root; for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--) { id=ColorToNodeId(cube_info,&pixel,index); if (node_info->child[id] == (NodeInfo *) NULL) break; node_info=node_info->child[id]; } node_info=node_info->parent; /* Find closest color among siblings and their children. */ p->target=pixel; p->distance=(MagickRealType) (4.0*(QuantumRange+1.0)*((MagickRealType) QuantumRange+1.0)+1.0); ClosestColor(image,p,node_info->parent); p->cache[i]=(ssize_t) p->color_number; } /* Assign pixel to closest colormap entry. */ index=(size_t) (1*p->cache[i]); if (image->storage_class == PseudoClass) *indexes=(IndexPacket) index; if (cube_info->quantize_info->measure_error == MagickFalse) { SetPixelRgb(q,image->colormap+index); if (cube_info->associate_alpha != MagickFalse) SetPixelOpacity(q,image->colormap[index].opacity); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) return(MagickFalse); /* Propagate the error as the last entry of the error queue. */ (void) CopyMagickMemory(p->error,p->error+1,(ErrorQueueLength-1)* sizeof(p->error[0])); AssociateAlphaPixel(cube_info,image->colormap+index,&color); p->error[ErrorQueueLength-1].red=pixel.red-color.red; p->error[ErrorQueueLength-1].green=pixel.green-color.green; p->error[ErrorQueueLength-1].blue=pixel.blue-color.blue; if (cube_info->associate_alpha != MagickFalse) p->error[ErrorQueueLength-1].opacity=pixel.opacity-color.opacity; proceed=SetImageProgress(image,DitherImageTag,p->offset,p->span); if (proceed == MagickFalse) return(MagickFalse); p->offset++; } switch (direction) { case WestGravity: p->x--; break; case EastGravity: p->x++; break; case NorthGravity: p->y--; break; case SouthGravity: p->y++; break; } return(MagickTrue); } static inline ssize_t MagickMax(const ssize_t x,const ssize_t y) { if (x > y) return(x); return(y); } static inline ssize_t MagickMin(const ssize_t x,const ssize_t y) { if (x < y) return(x); return(y); } static MagickBooleanType DitherImage(Image *image,CubeInfo *cube_info) { CacheView *image_view; MagickBooleanType status; register ssize_t i; size_t depth; if (cube_info->quantize_info->dither_method != RiemersmaDitherMethod) return(FloydSteinbergDither(image,cube_info)); /* Distribute quantization error along a Hilbert curve. */ (void) ResetMagickMemory(cube_info->error,0,ErrorQueueLength* sizeof(*cube_info->error)); cube_info->x=0; cube_info->y=0; i=MagickMax((ssize_t) image->columns,(ssize_t) image->rows); for (depth=1; i != 0; depth++) i>>=1; if ((ssize_t) (1L << depth) < MagickMax((ssize_t) image->columns,(ssize_t) image->rows)) depth++; cube_info->offset=0; cube_info->span=(MagickSizeType) image->columns*image->rows; image_view=AcquireCacheView(image); if (depth > 1) Riemersma(image,image_view,cube_info,depth-1,NorthGravity); status=RiemersmaDither(image,image_view,cube_info,ForgetGravity); image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t C u b e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetCubeInfo() initialize the Cube data structure. % % The format of the GetCubeInfo method is: % % CubeInfo GetCubeInfo(const QuantizeInfo *quantize_info, % const size_t depth,const size_t maximum_colors) % % A description of each parameter follows. % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o depth: Normally, this integer value is zero or one. A zero or % one tells Quantize to choose a optimal tree depth of Log4(number_colors). % A tree of this depth generally allows the best representation of the % reference image with the least amount of memory and the fastest % computational speed. In some cases, such as an image with low color % dispersion (a few number of colors), a value other than % Log4(number_colors) is required. To expand the color tree completely, % use a value of 8. % % o maximum_colors: maximum colors. % */ static CubeInfo *GetCubeInfo(const QuantizeInfo *quantize_info, const size_t depth,const size_t maximum_colors) { CubeInfo *cube_info; MagickRealType sum, weight; register ssize_t i; size_t length; /* Initialize tree to describe color cube_info. */ cube_info=(CubeInfo *) AcquireMagickMemory(sizeof(*cube_info)); if (cube_info == (CubeInfo *) NULL) return((CubeInfo *) NULL); (void) ResetMagickMemory(cube_info,0,sizeof(*cube_info)); cube_info->depth=depth; if (cube_info->depth > MaxTreeDepth) cube_info->depth=MaxTreeDepth; if (cube_info->depth < 2) cube_info->depth=2; cube_info->maximum_colors=maximum_colors; /* Initialize root node. */ cube_info->root=GetNodeInfo(cube_info,0,0,(NodeInfo *) NULL); if (cube_info->root == (NodeInfo *) NULL) return((CubeInfo *) NULL); cube_info->root->parent=cube_info->root; cube_info->quantize_info=CloneQuantizeInfo(quantize_info); if (cube_info->quantize_info->dither == MagickFalse) return(cube_info); /* Initialize dither resources. */ length=(size_t) (1UL << (4*(8-CacheShift))); cube_info->cache=(ssize_t *) AcquireQuantumMemory(length, sizeof(*cube_info->cache)); if (cube_info->cache == (ssize_t *) NULL) return((CubeInfo *) NULL); /* Initialize color cache. */ for (i=0; i < (ssize_t) length; i++) cube_info->cache[i]=(-1); /* Distribute weights along a curve of exponential decay. */ weight=1.0; for (i=0; i < ErrorQueueLength; i++) { cube_info->weights[ErrorQueueLength-i-1]=1.0/weight; weight*=exp(log(((double) QuantumRange+1.0))/(ErrorQueueLength-1.0)); } /* Normalize the weighting factors. */ weight=0.0; for (i=0; i < ErrorQueueLength; i++) weight+=cube_info->weights[i]; sum=0.0; for (i=0; i < ErrorQueueLength; i++) { cube_info->weights[i]/=weight; sum+=cube_info->weights[i]; } cube_info->weights[0]+=1.0-sum; return(cube_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t N o d e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetNodeInfo() allocates memory for a new node in the color cube tree and % presets all fields to zero. % % The format of the GetNodeInfo method is: % % NodeInfo *GetNodeInfo(CubeInfo *cube_info,const size_t id, % const size_t level,NodeInfo *parent) % % A description of each parameter follows. % % o node: The GetNodeInfo method returns a pointer to a queue of nodes. % % o id: Specifies the child number of the node. % % o level: Specifies the level in the storage_class the node resides. % */ static NodeInfo *GetNodeInfo(CubeInfo *cube_info,const size_t id, const size_t level,NodeInfo *parent) { NodeInfo *node_info; if (cube_info->free_nodes == 0) { Nodes *nodes; /* Allocate a new queue of nodes. */ nodes=(Nodes *) AcquireMagickMemory(sizeof(*nodes)); if (nodes == (Nodes *) NULL) return((NodeInfo *) NULL); nodes->nodes=(NodeInfo *) AcquireQuantumMemory(NodesInAList, sizeof(*nodes->nodes)); if (nodes->nodes == (NodeInfo *) NULL) return((NodeInfo *) NULL); nodes->next=cube_info->node_queue; cube_info->node_queue=nodes; cube_info->next_node=nodes->nodes; cube_info->free_nodes=NodesInAList; } cube_info->nodes++; cube_info->free_nodes--; node_info=cube_info->next_node++; (void) ResetMagickMemory(node_info,0,sizeof(*node_info)); node_info->parent=parent; node_info->id=id; node_info->level=level; return(node_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e Q u a n t i z e E r r o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageQuantizeError() measures the difference between the original % and quantized images. This difference is the total quantization error. % The error is computed by summing over all pixels in an image the distance % squared in RGB space between each reference pixel value and its quantized % value. These values are computed: % % o mean_error_per_pixel: This value is the mean error for any single % pixel in the image. % % o normalized_mean_square_error: This value is the normalized mean % quantization error for any single pixel in the image. This distance % measure is normalized to a range between 0 and 1. It is independent % of the range of red, green, and blue values in the image. % % o normalized_maximum_square_error: Thsi value is the normalized % maximum quantization error for any single pixel in the image. This % distance measure is normalized to a range between 0 and 1. It is % independent of the range of red, green, and blue values in your image. % % The format of the GetImageQuantizeError method is: % % MagickBooleanType GetImageQuantizeError(Image *image) % % A description of each parameter follows. % % o image: the image. % */ MagickExport MagickBooleanType GetImageQuantizeError(Image *image) { CacheView *image_view; ExceptionInfo *exception; IndexPacket *indexes; MagickRealType alpha, area, beta, distance, maximum_error, mean_error, mean_error_per_pixel; size_t index; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); image->total_colors=GetNumberColors(image,(FILE *) NULL,&image->exception); (void) ResetMagickMemory(&image->error,0,sizeof(image->error)); if (image->storage_class == DirectClass) return(MagickTrue); alpha=1.0; beta=1.0; area=3.0*image->columns*image->rows; maximum_error=0.0; mean_error_per_pixel=0.0; mean_error=0.0; exception=(&image->exception); image_view=AcquireCacheView(image); for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *restrict p; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { index=1UL*GetPixelIndex(indexes+x); if (image->matte != MagickFalse) { alpha=(MagickRealType) (QuantumScale*(GetPixelAlpha(p))); beta=(MagickRealType) (QuantumScale*(QuantumRange- image->colormap[index].opacity)); } distance=fabs(alpha*GetPixelRed(p)-beta*image->colormap[index].red); mean_error_per_pixel+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; distance=fabs(alpha*GetPixelGreen(p)-beta*image->colormap[index].green); mean_error_per_pixel+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; distance=fabs(alpha*GetPixelBlue(p)-beta*image->colormap[index].blue); mean_error_per_pixel+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; p++; } } image_view=DestroyCacheView(image_view); image->error.mean_error_per_pixel=(double) mean_error_per_pixel/area; image->error.normalized_mean_error=(double) QuantumScale*QuantumScale* mean_error/area; image->error.normalized_maximum_error=(double) QuantumScale*maximum_error; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t Q u a n t i z e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetQuantizeInfo() initializes the QuantizeInfo structure. % % The format of the GetQuantizeInfo method is: % % GetQuantizeInfo(QuantizeInfo *quantize_info) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to a QuantizeInfo structure. % */ MagickExport void GetQuantizeInfo(QuantizeInfo *quantize_info) { (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(quantize_info != (QuantizeInfo *) NULL); (void) ResetMagickMemory(quantize_info,0,sizeof(*quantize_info)); quantize_info->number_colors=256; quantize_info->dither=MagickTrue; quantize_info->dither_method=RiemersmaDitherMethod; quantize_info->colorspace=UndefinedColorspace; quantize_info->measure_error=MagickFalse; quantize_info->signature=MagickSignature; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P o s t e r i z e I m a g e C h a n n e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PosterizeImage() reduces the image to a limited number of colors for a % "poster" effect. % % The format of the PosterizeImage method is: % % MagickBooleanType PosterizeImage(Image *image,const size_t levels, % const MagickBooleanType dither) % MagickBooleanType PosterizeImageChannel(Image *image, % const ChannelType channel,const size_t levels, % const MagickBooleanType dither) % % A description of each parameter follows: % % o image: Specifies a pointer to an Image structure. % % o levels: Number of color levels allowed in each channel. Very low values % (2, 3, or 4) have the most visible effect. % % o dither: Set this integer value to something other than zero to dither % the mapped image. % */ static inline ssize_t MagickRound(MagickRealType x) { /* Round the fraction to nearest integer. */ if (x >= 0.0) return((ssize_t) (x+0.5)); return((ssize_t) (x-0.5)); } MagickExport MagickBooleanType PosterizeImage(Image *image,const size_t levels, const MagickBooleanType dither) { MagickBooleanType status; status=PosterizeImageChannel(image,DefaultChannels,levels,dither); return(status); } MagickExport MagickBooleanType PosterizeImageChannel(Image *image, const ChannelType channel,const size_t levels,const MagickBooleanType dither) { #define PosterizeImageTag "Posterize/Image" #define PosterizePixel(pixel) (Quantum) (QuantumRange*(MagickRound( \ QuantumScale*pixel*(levels-1)))/MagickMax((ssize_t) levels-1,1)) CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; QuantizeInfo *quantize_info; register ssize_t i; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) #endif for (i=0; i < (ssize_t) image->colors; i++) { /* Posterize colormap. */ if ((channel & RedChannel) != 0) image->colormap[i].red=PosterizePixel(image->colormap[i].red); if ((channel & GreenChannel) != 0) image->colormap[i].green=PosterizePixel(image->colormap[i].green); if ((channel & BlueChannel) != 0) image->colormap[i].blue=PosterizePixel(image->colormap[i].blue); if ((channel & OpacityChannel) != 0) image->colormap[i].opacity=PosterizePixel(image->colormap[i].opacity); } /* Posterize image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireCacheView(image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q,PosterizePixel(GetPixelRed(q))); if ((channel & GreenChannel) != 0) SetPixelGreen(q,PosterizePixel(GetPixelGreen(q))); if ((channel & BlueChannel) != 0) SetPixelBlue(q,PosterizePixel(GetPixelBlue(q))); if (((channel & OpacityChannel) != 0) && (image->matte == MagickTrue)) SetPixelOpacity(q,PosterizePixel(GetPixelOpacity(q))); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,PosterizePixel(GetPixelIndex(indexes+x))); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_PosterizeImageChannel) #endif proceed=SetImageProgress(image,PosterizeImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); quantize_info=AcquireQuantizeInfo((ImageInfo *) NULL); quantize_info->number_colors=(size_t) MagickMin((ssize_t) levels*levels* levels,MaxColormapSize+1); quantize_info->dither=dither; quantize_info->tree_depth=MaxTreeDepth; status=QuantizeImage(quantize_info,image); quantize_info=DestroyQuantizeInfo(quantize_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P r u n e C h i l d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PruneChild() deletes the given node and merges its statistics into its % parent. % % The format of the PruneSubtree method is: % % PruneChild(const Image *image,CubeInfo *cube_info, % const NodeInfo *node_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is to be pruned. % */ static void PruneChild(const Image *image,CubeInfo *cube_info, const NodeInfo *node_info) { NodeInfo *parent; register ssize_t i; size_t number_children; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) PruneChild(image,cube_info,node_info->child[i]); /* Merge color statistics into parent. */ parent=node_info->parent; parent->number_unique+=node_info->number_unique; parent->total_color.red+=node_info->total_color.red; parent->total_color.green+=node_info->total_color.green; parent->total_color.blue+=node_info->total_color.blue; parent->total_color.opacity+=node_info->total_color.opacity; parent->child[node_info->id]=(NodeInfo *) NULL; cube_info->nodes--; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P r u n e L e v e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PruneLevel() deletes all nodes at the bottom level of the color tree merging % their color statistics into their parent node. % % The format of the PruneLevel method is: % % PruneLevel(const Image *image,CubeInfo *cube_info, % const NodeInfo *node_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is to be pruned. % */ static void PruneLevel(const Image *image,CubeInfo *cube_info, const NodeInfo *node_info) { register ssize_t i; size_t number_children; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) PruneLevel(image,cube_info,node_info->child[i]); if (node_info->level == cube_info->depth) PruneChild(image,cube_info,node_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P r u n e T o C u b e D e p t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PruneToCubeDepth() deletes any nodes at a depth greater than % cube_info->depth while merging their color statistics into their parent % node. % % The format of the PruneToCubeDepth method is: % % PruneToCubeDepth(const Image *image,CubeInfo *cube_info, % const NodeInfo *node_info) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is to be pruned. % */ static void PruneToCubeDepth(const Image *image,CubeInfo *cube_info, const NodeInfo *node_info) { register ssize_t i; size_t number_children; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) PruneToCubeDepth(image,cube_info,node_info->child[i]); if (node_info->level > cube_info->depth) PruneChild(image,cube_info,node_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % Q u a n t i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QuantizeImage() analyzes the colors within a reference image and chooses a % fixed number of colors to represent the image. The goal of the algorithm % is to minimize the color difference between the input and output image while % minimizing the processing time. % % The format of the QuantizeImage method is: % % MagickBooleanType QuantizeImage(const QuantizeInfo *quantize_info, % Image *image) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o image: the image. % */ static MagickBooleanType DirectToColormapImage(Image *image, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; register ssize_t i; size_t number_colors; ssize_t y; status=MagickTrue; number_colors=(size_t) (image->columns*image->rows); if (AcquireImageColormap(image,number_colors) == MagickFalse) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); if (image->colors != number_colors) return(MagickFalse); i=0; image_view=AcquireCacheView(image); for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType proceed; register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (const PixelPacket *) NULL) break; indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { image->colormap[i].red=GetPixelRed(q); image->colormap[i].green=GetPixelGreen(q); image->colormap[i].blue=GetPixelBlue(q); image->colormap[i].opacity=GetPixelOpacity(q); SetPixelIndex(indexes+x,i); i++; q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) break; proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } MagickExport MagickBooleanType QuantizeImage(const QuantizeInfo *quantize_info, Image *image) { CubeInfo *cube_info; MagickBooleanType status; size_t depth, maximum_colors; assert(quantize_info != (const QuantizeInfo *) NULL); assert(quantize_info->signature == MagickSignature); assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); maximum_colors=quantize_info->number_colors; if (maximum_colors == 0) maximum_colors=MaxColormapSize; if (maximum_colors > MaxColormapSize) maximum_colors=MaxColormapSize; if ((image->columns*image->rows) <= maximum_colors) (void) DirectToColormapImage(image,&image->exception); if ((IsGrayImage(image,&image->exception) != MagickFalse) && (image->matte == MagickFalse)) (void) SetGrayscaleImage(image); if ((image->storage_class == PseudoClass) && (image->colors <= maximum_colors)) return(MagickTrue); depth=quantize_info->tree_depth; if (depth == 0) { size_t colors; /* Depth of color tree is: Log4(colormap size)+2. */ colors=maximum_colors; for (depth=1; colors != 0; depth++) colors>>=2; if ((quantize_info->dither != MagickFalse) && (depth > 2)) depth--; if ((image->matte != MagickFalse) && (depth > 5)) depth--; } /* Initialize color cube. */ cube_info=GetCubeInfo(quantize_info,depth,maximum_colors); if (cube_info == (CubeInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=ClassifyImageColors(cube_info,image,&image->exception); if (status != MagickFalse) { /* Reduce the number of colors in the image. */ ReduceImageColors(image,cube_info); status=AssignImageColors(image,cube_info); } DestroyCubeInfo(cube_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % Q u a n t i z e I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QuantizeImages() analyzes the colors within a set of reference images and % chooses a fixed number of colors to represent the set. The goal of the % algorithm is to minimize the color difference between the input and output % images while minimizing the processing time. % % The format of the QuantizeImages method is: % % MagickBooleanType QuantizeImages(const QuantizeInfo *quantize_info, % Image *images) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o images: Specifies a pointer to a list of Image structures. % */ MagickExport MagickBooleanType QuantizeImages(const QuantizeInfo *quantize_info, Image *images) { CubeInfo *cube_info; Image *image; MagickBooleanType proceed, status; MagickProgressMonitor progress_monitor; register ssize_t i; size_t depth, maximum_colors, number_images; assert(quantize_info != (const QuantizeInfo *) NULL); assert(quantize_info->signature == MagickSignature); assert(images != (Image *) NULL); assert(images->signature == MagickSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); if (GetNextImageInList(images) == (Image *) NULL) { /* Handle a single image with QuantizeImage. */ status=QuantizeImage(quantize_info,images); return(status); } status=MagickFalse; maximum_colors=quantize_info->number_colors; if (maximum_colors == 0) maximum_colors=MaxColormapSize; if (maximum_colors > MaxColormapSize) maximum_colors=MaxColormapSize; depth=quantize_info->tree_depth; if (depth == 0) { size_t colors; /* Depth of color tree is: Log4(colormap size)+2. */ colors=maximum_colors; for (depth=1; colors != 0; depth++) colors>>=2; if (quantize_info->dither != MagickFalse) depth--; } /* Initialize color cube. */ cube_info=GetCubeInfo(quantize_info,depth,maximum_colors); if (cube_info == (CubeInfo *) NULL) { (void) ThrowMagickException(&images->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename); return(MagickFalse); } number_images=GetImageListLength(images); image=images; for (i=0; image != (Image *) NULL; i++) { progress_monitor=SetImageProgressMonitor(image,(MagickProgressMonitor) NULL, image->client_data); status=ClassifyImageColors(cube_info,image,&image->exception); if (status == MagickFalse) break; (void) SetImageProgressMonitor(image,progress_monitor,image->client_data); proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) i, number_images); if (proceed == MagickFalse) break; image=GetNextImageInList(image); } if (status != MagickFalse) { /* Reduce the number of colors in an image sequence. */ ReduceImageColors(images,cube_info); image=images; for (i=0; image != (Image *) NULL; i++) { progress_monitor=SetImageProgressMonitor(image,(MagickProgressMonitor) NULL,image->client_data); status=AssignImageColors(image,cube_info); if (status == MagickFalse) break; (void) SetImageProgressMonitor(image,progress_monitor, image->client_data); proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) i, number_images); if (proceed == MagickFalse) break; image=GetNextImageInList(image); } } DestroyCubeInfo(cube_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e d u c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Reduce() traverses the color cube tree and prunes any node whose % quantization error falls below a particular threshold. % % The format of the Reduce method is: % % Reduce(const Image *image,CubeInfo *cube_info,const NodeInfo *node_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is to be pruned. % */ static void Reduce(const Image *image,CubeInfo *cube_info, const NodeInfo *node_info) { register ssize_t i; size_t number_children; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) Reduce(image,cube_info,node_info->child[i]); if (node_info->quantize_error <= cube_info->pruning_threshold) PruneChild(image,cube_info,node_info); else { /* Find minimum pruning threshold. */ if (node_info->number_unique > 0) cube_info->colors++; if (node_info->quantize_error < cube_info->next_threshold) cube_info->next_threshold=node_info->quantize_error; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e d u c e I m a g e C o l o r s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReduceImageColors() repeatedly prunes the tree until the number of nodes % with n2 > 0 is less than or equal to the maximum number of colors allowed % in the output image. On any given iteration over the tree, it selects % those nodes whose E value is minimal for pruning and merges their % color statistics upward. It uses a pruning threshold, Ep, to govern % node selection as follows: % % Ep = 0 % while number of nodes with (n2 > 0) > required maximum number of colors % prune all nodes such that E <= Ep % Set Ep to minimum E in remaining nodes % % This has the effect of minimizing any quantization error when merging % two nodes together. % % When a node to be pruned has offspring, the pruning procedure invokes % itself recursively in order to prune the tree from the leaves upward. % n2, Sr, Sg, and Sb in a node being pruned are always added to the % corresponding data in that node's parent. This retains the pruned % node's color characteristics for later averaging. % % For each node, n2 pixels exist for which that node represents the % smallest volume in RGB space containing those pixel's colors. When n2 % > 0 the node will uniquely define a color in the output image. At the % beginning of reduction, n2 = 0 for all nodes except a the leaves of % the tree which represent colors present in the input image. % % The other pixel count, n1, indicates the total number of colors % within the cubic volume which the node represents. This includes n1 - % n2 pixels whose colors should be defined by nodes at a lower level in % the tree. % % The format of the ReduceImageColors method is: % % ReduceImageColors(const Image *image,CubeInfo *cube_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % */ static void ReduceImageColors(const Image *image,CubeInfo *cube_info) { #define ReduceImageTag "Reduce/Image" MagickBooleanType proceed; MagickOffsetType offset; size_t span; cube_info->next_threshold=0.0; for (span=cube_info->colors; cube_info->colors > cube_info->maximum_colors; ) { cube_info->pruning_threshold=cube_info->next_threshold; cube_info->next_threshold=cube_info->root->quantize_error-1; cube_info->colors=0; Reduce(image,cube_info,cube_info->root); offset=(MagickOffsetType) span-cube_info->colors; proceed=SetImageProgress(image,ReduceImageTag,offset,span- cube_info->maximum_colors+1); if (proceed == MagickFalse) break; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e m a p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RemapImage() replaces the colors of an image with the closest color from % a reference image. % % The format of the RemapImage method is: % % MagickBooleanType RemapImage(const QuantizeInfo *quantize_info, % Image *image,const Image *remap_image) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o image: the image. % % o remap_image: the reference image. % */ MagickExport MagickBooleanType RemapImage(const QuantizeInfo *quantize_info, Image *image,const Image *remap_image) { CubeInfo *cube_info; MagickBooleanType status; /* Initialize color cube. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(remap_image != (Image *) NULL); assert(remap_image->signature == MagickSignature); cube_info=GetCubeInfo(quantize_info,MaxTreeDepth, quantize_info->number_colors); if (cube_info == (CubeInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=ClassifyImageColors(cube_info,remap_image,&image->exception); if (status != MagickFalse) { /* Classify image colors from the reference image. */ cube_info->quantize_info->number_colors=cube_info->colors; status=AssignImageColors(image,cube_info); } DestroyCubeInfo(cube_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e m a p I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RemapImages() replaces the colors of a sequence of images with the % closest color from a reference image. % % The format of the RemapImage method is: % % MagickBooleanType RemapImages(const QuantizeInfo *quantize_info, % Image *images,Image *remap_image) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o images: the image sequence. % % o remap_image: the reference image. % */ MagickExport MagickBooleanType RemapImages(const QuantizeInfo *quantize_info, Image *images,const Image *remap_image) { CubeInfo *cube_info; Image *image; MagickBooleanType status; assert(images != (Image *) NULL); assert(images->signature == MagickSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); image=images; if (remap_image == (Image *) NULL) { /* Create a global colormap for an image sequence. */ status=QuantizeImages(quantize_info,images); return(status); } /* Classify image colors from the reference image. */ cube_info=GetCubeInfo(quantize_info,MaxTreeDepth, quantize_info->number_colors); if (cube_info == (CubeInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=ClassifyImageColors(cube_info,remap_image,&image->exception); if (status != MagickFalse) { /* Classify image colors from the reference image. */ cube_info->quantize_info->number_colors=cube_info->colors; image=images; for ( ; image != (Image *) NULL; image=GetNextImageInList(image)) { status=AssignImageColors(image,cube_info); if (status == MagickFalse) break; } } DestroyCubeInfo(cube_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t G r a y s c a l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetGrayscaleImage() converts an image to a PseudoClass grayscale image. % % The format of the SetGrayscaleImage method is: % % MagickBooleanType SetGrayscaleImage(Image *image) % % A description of each parameter follows: % % o image: The image. % */ #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static int IntensityCompare(const void *x,const void *y) { PixelPacket *color_1, *color_2; ssize_t intensity; color_1=(PixelPacket *) x; color_2=(PixelPacket *) y; intensity=PixelIntensityToQuantum(color_1)-(ssize_t) PixelIntensityToQuantum(color_2); return((int) intensity); } #if defined(__cplusplus) || defined(c_plusplus) } #endif static MagickBooleanType SetGrayscaleImage(Image *image) { CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; PixelPacket *colormap; register ssize_t i; ssize_t *colormap_index, j, y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->type != GrayscaleType) (void) TransformImageColorspace(image,GRAYColorspace); colormap_index=(ssize_t *) AcquireQuantumMemory(MaxMap+1, sizeof(*colormap_index)); if (colormap_index == (ssize_t *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); if (image->storage_class != PseudoClass) { ExceptionInfo *exception; for (i=0; i <= (ssize_t) MaxMap; i++) colormap_index[i]=(-1); if (AcquireImageColormap(image,MaxMap+1) == MagickFalse) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); image->colors=0; status=MagickTrue; exception=(&image->exception); image_view=AcquireCacheView(image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(status) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *restrict indexes; register const PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { register size_t intensity; intensity=ScaleQuantumToMap(GetPixelRed(q)); if (colormap_index[intensity] < 0) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SetGrayscaleImage) #endif if (colormap_index[intensity] < 0) { colormap_index[intensity]=(ssize_t) image->colors; image->colormap[image->colors].red=GetPixelRed(q); image->colormap[image->colors].green=GetPixelGreen(q); image->colormap[image->colors].blue=GetPixelBlue(q); image->colors++; } } SetPixelIndex(indexes+x,colormap_index[intensity]); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); } for (i=0; i < (ssize_t) image->colors; i++) image->colormap[i].opacity=(unsigned short) i; qsort((void *) image->colormap,image->colors,sizeof(PixelPacket), IntensityCompare); colormap=(PixelPacket *) AcquireQuantumMemory(image->colors, sizeof(*colormap)); if (colormap == (PixelPacket *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); j=0; colormap[j]=image->colormap[0]; for (i=0; i < (ssize_t) image->colors; i++) { if (IsSameColor(image,&colormap[j],&image->colormap[i]) == MagickFalse) { j++; colormap[j]=image->colormap[i]; } colormap_index[(ssize_t) image->colormap[i].opacity]=j; } image->colors=(size_t) (j+1); image->colormap=(PixelPacket *) RelinquishMagickMemory(image->colormap); image->colormap=colormap; status=MagickTrue; exception=(&image->exception); image_view=AcquireCacheView(image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(status) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *restrict indexes; register const PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) SetPixelIndex(indexes+x,colormap_index[ScaleQuantumToMap(GetPixelIndex( indexes+x))]); if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index); image->type=GrayscaleType; if (IsMonochromeImage(image,&image->exception) != MagickFalse) image->type=BilevelType; return(status); }
GB_unop__ceil_fp64_fp64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__ceil_fp64_fp64) // op(A') function: GB (_unop_tran__ceil_fp64_fp64) // C type: double // A type: double // cast: double cij = aij // unaryop: cij = ceil (aij) #define GB_ATYPE \ double #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = ceil (x) ; // casting #define GB_CAST(z, aij) \ double z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ double aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ double z = aij ; \ Cx [pC] = ceil (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_CEIL || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__ceil_fp64_fp64) ( double *Cx, // Cx and Ax may be aliased const double *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double aij = Ax [p] ; double z = aij ; Cx [p] = ceil (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; double aij = Ax [p] ; double z = aij ; Cx [p] = ceil (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__ceil_fp64_fp64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
parallel_for.h
/*! * Copyright (c) 2021 by Contributors * \file runtime/container.h * \brief Defines the container object data structures. */ #ifndef DGL_RUNTIME_PARALLEL_FOR_H_ #define DGL_RUNTIME_PARALLEL_FOR_H_ #include <dmlc/omp.h> #include <algorithm> #include <string> #include <cstdlib> namespace { int64_t divup(int64_t x, int64_t y) { return (x + y - 1) / y; } } namespace dgl { namespace runtime { namespace { size_t compute_num_threads(size_t begin, size_t end, size_t grain_size) { if (omp_in_parallel() || end - begin <= grain_size || end - begin == 1) return 1; return std::min(static_cast<int64_t>(omp_get_max_threads()), divup(end - begin, grain_size)); } struct DefaultGrainSizeT { size_t grain_size; DefaultGrainSizeT() { auto var = std::getenv("DGL_PARALLEL_FOR_GRAIN_SIZE"); if (!var) { grain_size = 1; } else { grain_size = std::stoul(var); } } size_t operator()() { return grain_size; } }; } // namespace static DefaultGrainSizeT default_grain_size; /*! * \brief OpenMP-based parallel for loop. * * It requires each thread's workload to have at least \a grain_size elements. * The loop body will be a function that takes in a single argument \a i, which * stands for the index of the workload. */ template <typename F> void parallel_for( const size_t begin, const size_t end, const size_t grain_size, F&& f) { if (begin >= end) { return; } #ifdef _OPENMP auto num_threads = compute_num_threads(begin, end, grain_size); #pragma omp parallel num_threads(num_threads) { auto tid = omp_get_thread_num(); auto chunk_size = divup((end - begin), num_threads); auto begin_tid = begin + tid * chunk_size; if (begin_tid < end) { for (auto i = begin_tid; i < std::min(end, chunk_size + begin_tid); i++) { f(i); } } } #else for (auto i = begin; i < end; i++) f(i); #endif } /*! * \brief OpenMP-based parallel for loop with default grain size. * * parallel_for with grain size to default value, either 1 or controlled through * environment variable DGL_PARALLEL_FOR_GRAIN_SIZE. * If grain size is set to 1, the function behaves the same way as OpenMP * parallel for pragma with static scheduling. */ template <typename F> void parallel_for( const size_t begin, const size_t end, F&& f) { parallel_for(begin, end, default_grain_size(), f); } } // namespace runtime } // namespace dgl #endif // DGL_RUNTIME_PARALLEL_FOR_H_